From d99d3ed2a9e992de40a989c9f574dde3b50c4774 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Mart=C3=ADn-Gonz=C3=A1lez?= <3705952+unpollito@users.noreply.github.com> Date: Wed, 21 Jan 2026 15:48:20 +0100 Subject: [PATCH 001/561] Revert "Install buildx on dind" This reverts commit 44a899fc2a04f9a14f2e03cde94aba8f79c6f666. --- runner/Makefile | 8 -------- ...-runner-dind-rootless.ubuntu-20.04.dockerfile | 11 +---------- ...-runner-dind-rootless.ubuntu-22.04.dockerfile | 11 +---------- .../actions-runner-dind.ubuntu-20.04.dockerfile | 13 +------------ .../actions-runner-dind.ubuntu-22.04.dockerfile | 16 +--------------- 5 files changed, 4 insertions(+), 55 deletions(-) diff --git a/runner/Makefile b/runner/Makefile index 8f5df1f39b..3dffc9a08b 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -9,7 +9,6 @@ TARGETPLATFORM ?= $(shell arch) RUNNER_VERSION ?= 2.299.1 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.1.3 DOCKER_VERSION ?= 20.10.21 -DOCKER_BUILDX_VERSION ?= 0.6.3 # default list of platforms for which multiarch image is built ifeq (${PLATFORMS}, ) @@ -53,14 +52,12 @@ docker-build-set: check-target-platform --build-arg RUNNER_VERSION=${RUNNER_VERSION} \ --build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \ --build-arg DOCKER_VERSION=${DOCKER_VERSION} \ - --build-arg DOCKER_BUILDX_VERSION=${DOCKER_BUILDX_VERSION} \ -f actions-runner-dind.${OS_IMAGE}.dockerfile \ -t ${DIND_RUNNER_NAME}:${OS_IMAGE} . ${DOCKER} build \ --build-arg TARGETPLATFORM=${TARGETPLATFORM} \ --build-arg RUNNER_VERSION=${RUNNER_VERSION} \ --build-arg DOCKER_VERSION=${DOCKER_VERSION} \ - --build-arg DOCKER_BUILDX_VERSION=${DOCKER_BUILDX_VERSION} \ -f actions-runner-dind-rootless.${OS_IMAGE}.dockerfile \ -t "${DIND_ROOTLESS_RUNNER_NAME}:${OS_IMAGE}" . @@ -79,7 +76,6 @@ docker-build-dind: check-target-platform --build-arg RUNNER_VERSION=${RUNNER_VERSION} \ --build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \ --build-arg DOCKER_VERSION=${DOCKER_VERSION} \ - --build-arg DOCKER_BUILDX_VERSION=${DOCKER_BUILDX_VERSION} \ -f actions-runner-dind.${OS_IMAGE}.dockerfile \ -t ${DIND_RUNNER_NAME}:${OS_IMAGE} . @@ -111,7 +107,6 @@ docker-buildx-set: --build-arg RUNNER_VERSION=${RUNNER_VERSION} \ --build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \ --build-arg DOCKER_VERSION=${DOCKER_VERSION} \ - --build-arg DOCKER_BUILDX_VERSION=${DOCKER_BUILDX_VERSION} \ -f actions-runner-dind.${OS_IMAGE}.dockerfile \ -t "${DIND_RUNNER_NAME}:${OS_IMAGE}" \ . ${PUSH_ARG} @@ -119,7 +114,6 @@ docker-buildx-set: --build-arg RUNNER_VERSION=${RUNNER_VERSION} \ --build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \ --build-arg DOCKER_VERSION=${DOCKER_VERSION} \ - --build-arg DOCKER_BUILDX_VERSION=${DOCKER_BUILDX_VERSION} \ -f actions-runner-dind-rootless.${OS_IMAGE}.dockerfile \ -t "${DIND_ROOTLESS_RUNNER_NAME}:${OS_IMAGE}" \ . ${PUSH_ARG} @@ -148,7 +142,6 @@ docker-buildx-dind: --build-arg RUNNER_VERSION=${RUNNER_VERSION} \ --build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \ --build-arg DOCKER_VERSION=${DOCKER_VERSION} \ - --build-arg DOCKER_BUILDX_VERSION=${DOCKER_BUILDX_VERSION} \ -f actions-runner-dind.${OS_IMAGE}.dockerfile \ -t "${DIND_RUNNER_NAME}:${OS_IMAGE}" \ . ${PUSH_ARG} @@ -163,7 +156,6 @@ docker-buildx-dind-rootless: --build-arg RUNNER_VERSION=${RUNNER_VERSION} \ --build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \ --build-arg DOCKER_VERSION=${DOCKER_VERSION} \ - --build-arg DOCKER_BUILDX_VERSION=${DOCKER_BUILDX_VERSION} \ -f actions-runner-dind-rootless.${OS_IMAGE}.dockerfile \ -t "${DIND_ROOTLESS_RUNNER_NAME}:${OS_IMAGE}" \ . ${PUSH_ARG} diff --git a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile index 690c85e3bc..437f6b5098 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile @@ -5,7 +5,6 @@ ARG RUNNER_VERSION=2.299.1 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.2 # Docker and Docker Compose arguments ENV CHANNEL=stable -ARG DOCKER_BUILDX_VERSION=0.6.3 ARG DOCKER_COMPOSE_VERSION=v2.6.0 ARG DUMB_INIT_VERSION=1.2.5 @@ -135,15 +134,7 @@ USER runner # This will install docker under $HOME/bin according to the content of the script RUN export SKIP_IPTABLES=1 \ && curl -fsSL https://get.docker.com/rootless | sh \ - && /home/runner/bin/docker -v \ - && mkdir -p /home/runner/.docker/cli-plugins \ - && export BUILDX_ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ - && if ! curl -L -o /home/runner/.docker/cli-plugins/docker-buildx "https://github.com/docker/buildx/releases/download/v${DOCKER_BUILDX_VERSION}/buildx-v${DOCKER_BUILDX_VERSION}.linux-${BUILDX_ARCH}"; then \ - echo >&2 "error: failed to download docker-buildx v${DOCKER_VERSION}'"; \ - exit 1; \ - fi; \ - && chown -R runner:runner /home/runner/.docker; \ - && chmod +x /home/runner/.docker/cli-plugins/docker-buildx; + && /home/runner/bin/docker -v RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ diff --git a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile index fa316b6ac9..de63d3cc00 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile @@ -5,7 +5,6 @@ ARG RUNNER_VERSION=2.299.1 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.3 # Docker and Docker Compose arguments ENV CHANNEL=stable -ARG DOCKER_BUILDX_VERSION=0.6.3 ARG DOCKER_COMPOSE_VERSION=v2.12.2 ARG DUMB_INIT_VERSION=1.2.5 ARG RUNNER_USER_UID=1001 @@ -112,15 +111,7 @@ USER runner # This will install docker under $HOME/bin according to the content of the script RUN export SKIP_IPTABLES=1 \ && curl -fsSL https://get.docker.com/rootless | sh \ - && /home/runner/bin/docker -v \ - && mkdir -p /home/runner/.docker/cli-plugins \ - && export BUILDX_ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ - && if ! curl -L -o /home/runner/.docker/cli-plugins/docker-buildx "https://github.com/docker/buildx/releases/download/v${DOCKER_BUILDX_VERSION}/buildx-v${DOCKER_BUILDX_VERSION}.linux-${BUILDX_ARCH}"; then \ - echo >&2 "error: failed to download docker-buildx v${DOCKER_VERSION}'"; \ - exit 1; \ - fi; \ - && chown -R runner:runner /home/runner/.docker; \ - && chmod +x /home/runner/.docker/cli-plugins/docker-buildx; + && /home/runner/bin/docker -v RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ diff --git a/runner/actions-runner-dind.ubuntu-20.04.dockerfile b/runner/actions-runner-dind.ubuntu-20.04.dockerfile index b3b6495872..8e57d97b58 100644 --- a/runner/actions-runner-dind.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-20.04.dockerfile @@ -6,7 +6,6 @@ ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.2 # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.18 -ARG DOCKER_BUILDX_VERSION=0.6.3 ARG DOCKER_COMPOSE_VERSION=v2.6.0 ARG DUMB_INIT_VERSION=1.2.5 @@ -99,20 +98,10 @@ RUN set -vx; \ export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ && if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \ - && update-alternatives --set iptables /usr/sbin/iptables-legacy \ - && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy \ && curl -fLo docker.tgz https://download.docker.com/linux/static/${CHANNEL}/${ARCH}/docker-${DOCKER_VERSION}.tgz \ && tar zxvf docker.tgz \ && install -o root -g root -m 755 docker/* /usr/bin/ \ - && rm -rf docker docker.tgz \ - && mkdir -p /home/runner/.docker/cli-plugins \ - && export BUILDX_ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ - && if ! curl -L -o /home/runner/.docker/cli-plugins/docker-buildx "https://github.com/docker/buildx/releases/download/v${DOCKER_BUILDX_VERSION}/buildx-v${DOCKER_BUILDX_VERSION}.linux-${BUILDX_ARCH}"; then \ - echo >&2 "error: failed to download docker-buildx v${DOCKER_VERSION}'"; \ - exit 1; \ - fi \ - && chown -R runner:docker /home/runner/.docker \ - && chmod +x /home/runner/.docker/cli-plugins/docker-buildx + && rm -rf docker docker.tgz RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ diff --git a/runner/actions-runner-dind.ubuntu-22.04.dockerfile b/runner/actions-runner-dind.ubuntu-22.04.dockerfile index 6db4c4894d..39bd2422a0 100644 --- a/runner/actions-runner-dind.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-22.04.dockerfile @@ -6,7 +6,6 @@ ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.3 # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.21 -ARG DOCKER_BUILDX_VERSION=0.6.3 ARG DOCKER_COMPOSE_VERSION=v2.12.2 ARG DUMB_INIT_VERSION=1.2.5 ARG RUNNER_USER_UID=1001 @@ -18,18 +17,15 @@ RUN apt-get update -y \ && add-apt-repository -y ppa:git-core/ppa \ && apt-get update -y \ && apt-get install -y --no-install-recommends \ - build-essential \ curl \ ca-certificates \ git \ git-lfs \ iptables \ jq \ - make \ software-properties-common \ sudo \ unzip \ - wget \ zip \ && rm -rf /var/lib/apt/lists/* @@ -76,22 +72,12 @@ RUN cd "$RUNNER_ASSETS_DIR" \ RUN set -vx; \ export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ - export BUILDX_ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ && if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \ - && update-alternatives --set iptables /usr/sbin/iptables-legacy \ - && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy \ && curl -fLo docker.tgz https://download.docker.com/linux/static/${CHANNEL}/${ARCH}/docker-${DOCKER_VERSION}.tgz \ && tar zxvf docker.tgz \ && install -o root -g root -m 755 docker/* /usr/bin/ \ - && rm -rf docker docker.tgz \ - && mkdir -p /home/runner/.docker/cli-plugins \ - && if ! curl -L -o /home/runner/.docker/cli-plugins/docker-buildx "https://github.com/docker/buildx/releases/download/v${DOCKER_BUILDX_VERSION}/buildx-v${DOCKER_BUILDX_VERSION}.linux-${BUILDX_ARCH}"; then \ - echo >&2 "error: failed to download docker-buildx v${DOCKER_VERSION}'"; \ - exit 1; \ - fi \ - && chown -R runner:docker /home/runner/.docker \ - && chmod +x /home/runner/.docker/cli-plugins/docker-buildx + && rm -rf docker docker.tgz RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ From b1a7582d1aec93007b59ceee27c1a752c672b558 Mon Sep 17 00:00:00 2001 From: Siara <108543037+SiaraMist@users.noreply.github.com> Date: Thu, 5 Jan 2023 01:47:52 -0800 Subject: [PATCH 002/561] Restructure documentation (#2114) Breaks up the ARC documentation into several smaller articles. `@vijay-train` and `@martin389` put together the plan for this update, and I've just followed it here. In these updates: - The README has been updated to include more general project information, and link to each new article. - The `detailed-docs.md` file has been broken up into multiple articles, and then deleted. - The Actions Runner Controller Overview doc has been renamed to `about-arc.md`. Any edits to content beyond generally renaming headers or fixing typos is out of scope for this PR, but will be made in the future. Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- README.md | 168 +- ...er-Controller-Overview.md => about-arc.md} | 56 + docs/authenticating-to-the-github-api.md | 208 ++ docs/automatically-scaling-runners.md | 717 ++++++ docs/choosing-runner-destination.md | 91 + docs/configuring-windows-runners.md | 111 + docs/deploying-alternative-runners.md | 62 + docs/deploying-arc-runners.md | 161 ++ docs/detailed-docs.md | 1940 ----------------- docs/installing-arc.md | 26 + docs/managing-access-with-runner-groups.md | 32 + docs/monitoring-and-troubleshooting.md | 30 + docs/quickstart.md | 151 ++ docs/using-arc-across-organizations.md | 61 + docs/using-arc-runners-in-a-workflow.md | 40 + docs/using-custom-volumes.md | 205 ++ docs/using-entrypoint-features.md | 69 + 17 files changed, 2058 insertions(+), 2070 deletions(-) rename docs/{Actions-Runner-Controller-Overview.md => about-arc.md} (75%) create mode 100644 docs/authenticating-to-the-github-api.md create mode 100644 docs/automatically-scaling-runners.md create mode 100644 docs/choosing-runner-destination.md create mode 100644 docs/configuring-windows-runners.md create mode 100644 docs/deploying-alternative-runners.md create mode 100644 docs/deploying-arc-runners.md delete mode 100644 docs/detailed-docs.md create mode 100644 docs/installing-arc.md create mode 100644 docs/managing-access-with-runner-groups.md create mode 100644 docs/monitoring-and-troubleshooting.md create mode 100644 docs/quickstart.md create mode 100644 docs/using-arc-across-organizations.md create mode 100644 docs/using-arc-runners-in-a-workflow.md create mode 100644 docs/using-custom-volumes.md create mode 100644 docs/using-entrypoint-features.md diff --git a/README.md b/README.md index 3f14219354..dd14c3a56b 100644 --- a/README.md +++ b/README.md @@ -1,153 +1,61 @@ - # Actions Runner Controller (ARC) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/6061/badge)](https://bestpractices.coreinfrastructure.org/projects/6061) [![awesome-runners](https://img.shields.io/badge/listed%20on-awesome--runners-blue.svg)](https://github.com/jonico/awesome-runners) [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/actions-runner-controller)](https://artifacthub.io/packages/search?repo=actions-runner-controller) -GitHub Actions automates the deployment of code to different environments, including production. The environments contain the `GitHub Runner` software which executes the automation. `GitHub Runner` can be run in GitHub-hosted cloud or self-hosted environments. Self-hosted environments offer more control of hardware, operating system, and software tools. They can be run on physical machines, virtual machines, or in a container. Containerized environments are lightweight, loosely coupled, highly efficient and can be managed centrally. However, they are not straightforward to use. - -`Actions Runner Controller (ARC)` makes it simpler to run self hosted environments on Kubernetes(K8s) cluster. - -With ARC you can : - -- **Deploy self hosted runners on Kubernetes cluster** with a simple set of commands. -- **Auto scale runners** based on demand. -- **Setup across GitHub editions** including GitHub Enterprise editions and GitHub Enterprise Cloud. - -## Overview - -For an overview of ARC, please refer to "[ARC Overview](https://github.com/actions/actions-runner-controller/blob/master/docs/Actions-Runner-Controller-Overview.md)." - - - -## Getting Started - -ARC can be setup with just a few steps. - -In this section we will setup prerequisites, deploy ARC into a K8s cluster, and then run GitHub Action workflows on that cluster. - -### Prerequisites - -
Create a K8s cluster, if not available. - -If you don't have a K8s cluster, you can install a local environment using minikube. For more information, see "Installing minikube." - -
- -:one: Install cert-manager in your cluster. For more information, see "[cert-manager](https://cert-manager.io/docs/installation/)." - -```shell -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.8.2/cert-manager.yaml -``` - - *note:- This command uses v1.8.2. Please replace with a later version, if available. - ->You may also install cert-manager using Helm. For instructions, see "[Installing with Helm](https://cert-manager.io/docs/installation/helm/#installing-with-helm)." - -:two: Next, Generate a Personal Access Token (PAT) for ARC to authenticate with GitHub. +## People -- Login to your GitHub account and Navigate to "[Create new Token](https://github.com/settings/tokens/new)." -- Select **repo**. -- Click **Generate Token** and then copy the token locally ( we’ll need it later). +`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions), mostly in their spare time. -### Deploy and Configure ARC +If you think the project is awesome and it's becoming a basis for your important business, consider [sponsoring us](https://github.com/sponsors/actions-runner-controller)! -1️⃣ Deploy and configure ARC on your K8s cluster. You may use Helm or Kubectl. +In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means! -
Helm deployment +We don't currently have [any sponsors dedicated to this project yet](https://github.com/sponsors/actions-runner-controller). -##### Add repository +However, [HelloFresh](https://www.hellofreshgroup.com/en/) has recently started sponsoring @mumoshu for this project along with his other works. A part of their sponsorship will enable @mumoshu to add an E2E test to keep ARC even more reliable on AWS. Thank you for your sponsorship! -```shell -helm repo add actions-runner-controller https://actions-runner-controller.github.io/actions-runner-controller -``` +[](https://careers.hellofresh.com/) -##### Install Helm chart +## Status -```shell -helm upgrade --install --namespace actions-runner-system --create-namespace\ - --set=authSecret.create=true\ - --set=authSecret.github_token="REPLACE_YOUR_TOKEN_HERE"\ - --wait actions-runner-controller actions/actions-runner-controller -``` +Even though actions-runner-controller is used in production environments, it is still in its early stage of development, hence versioned 0.x. - *note:- Replace REPLACE_YOUR_TOKEN_HERE with your PAT that was generated previously. -
+actions-runner-controller complies to Semantic Versioning 2.0.0 in which v0.x means that there could be backward-incompatible changes for every release. -
Kubectl deployment +The documentation is kept inline with master@HEAD, we do our best to highlight any features that require a specific ARC version or higher however this is not always easily done due to there being many moving parts. Additionally, we actively do not retain compatibly with every GitHub Enterprise Server version nor every Kubernetes version so you will need to ensure you stay current within a reasonable timespan. -##### Deploy ARC +## About -```shell -kubectl apply -f \ -https://github.com/actions/actions-runner-controller/\ -releases/download/v0.22.0/actions-runner-controller.yaml -``` +[GitHub Actions](https://github.com/features/actions) is a very useful tool for automating development. GitHub Actions jobs are run in the cloud by default, but you may want to run your jobs in your environment. [Self-hosted runner](https://github.com/actions/runner) can be used for such use cases, but requires the provisioning and configuration of a virtual machine instance. Instead if you already have a Kubernetes cluster, it makes more sense to run the self-hosted runner on top of it. - *note:- Replace "v0.22.0" with the version you wish to deploy +**actions-runner-controller** makes that possible. Just create a *Runner* resource on your Kubernetes, and it will run and operate the self-hosted runner for the specified repository. Combined with Kubernetes RBAC, you can also build simple Self-hosted runners as a Service. -##### Configure Personal Access Token - -```shell -kubectl create secret generic controller-manager \ - -n actions-runner-system \ - --from-literal=github_token=REPLACE_YOUR_TOKEN_HERE -```` - - *note:- Replace REPLACE_YOUR_TOKEN_HERE with your PAT that was generated previously. - -
- -2️⃣ Create the GitHub self hosted runners and configure to run against your repository. - -Create a `runnerdeployment.yaml` file and copy the following YAML contents into it: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-runnerdeploy -spec: - replicas: 1 - template: - spec: - repository: mumoshu/actions-runner-controller-ci -```` - *note:- Replace "mumoshu/actions-runner-controller-ci" with your repository name. - -Apply this file to your K8s cluster. -```shell -kubectl apply -f runnerdeployment.yaml -```` - -*🎉 We are done - now we should have self hosted runners running in K8s configured to your repository. 🎉* - -Next - lets verify our setup and execute some workflows. - -### Verify and Execute Workflows - -:one: Verify that your setup is successful: -```shell - -$ kubectl get runners -NAME REPOSITORY STATUS -example-runnerdeploy2475h595fr mumoshu/actions-runner-controller-ci Running - -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -example-runnerdeploy2475ht2qbr 2/2 Running 0 1m -```` - -Also, this runner has been registered directly to the specified repository, you can see it in repository settings. For more information, see "[Checking the status of a self-hosted runner - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/monitoring-and-troubleshooting-self-hosted-runners#checking-the-status-of-a-self-hosted-runner)." - -:two: You are ready to execute workflows against this self-hosted runner. For more information, see "[Using self-hosted runners in a workflow - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow#using-self-hosted-runners-in-a-workflow)." - -There is also a quick start guide to get started on Actions, For more information, please refer to "[Quick start Guide to GitHub Actions](https://docs.github.com/en/actions/quickstart)." - -## Learn more - -For more detailed documentation, please refer to "[Detailed Documentation](https://github.com/actions/actions-runner-controller/blob/master/docs/detailed-docs.md)." +## Getting Started +To give ARC a try with just a handful of commands, Please refer to the [Quickstart guide](/docs/quickstart.md). + +For an overview of ARC, please refer to [ARC Overview](https://github.com/actions/actions-runner-controller/blob/master/docs/Actions-Runner-Controller-Overview.md) + +For more information, please refer to detailed documentation below! + +## Documentation + +- [Quickstart guide](/docs/quickstart.md) +- [About ARC](/docs/about-arc.md) +- [Installing ARC](/docs/installing-arc.md) +- [Authenticating to the GitHub API](/docs/authenticating-to-the-github-api.md) +- [Deploying ARC runners](/docs/deploying-arc-runners.md) +- [Adding ARC runners to a repository, organization, or enterprise](/docs/choosing-runner-destination.md) +- [Automatically scaling runners](/docs/automatically-scaling-runners.md) +- [Using custom volumes](/docs/using-custom-volumes.md) +- [Using ARC runners in a workflow](/docs/using-arc-runners-in-a-workflow.md) +- [Managing access with runner groups](/docs/managing-access-with-runner-groups.md) +- [Configuring Windows runners](/docs/configuring-windows-runners.md) +- [Using ARC across organizations](/docs/using-arc-across-organizations.md) +- [Using entrypoint features](/docs/using-entrypoint-features.md) +- [Deploying alternative runners](/docs/deploying-alternative-runners.md) +- [Monitoring and troubleshooting](/docs/monitoring-and-troubleshooting.md) ## Contributing diff --git a/docs/Actions-Runner-Controller-Overview.md b/docs/about-arc.md similarity index 75% rename from docs/Actions-Runner-Controller-Overview.md rename to docs/about-arc.md index 563fd071f7..ce621d04bc 100644 --- a/docs/Actions-Runner-Controller-Overview.md +++ b/docs/about-arc.md @@ -1,3 +1,5 @@ +# About ARC + ## Introduction This document provides a high-level overview of Actions Runner Controller (ARC). ARC enables running Github Actions Runners on Kubernetes (K8s) clusters. @@ -131,3 +133,57 @@ ARC supports several different advanced configuration. - Webhook driven scaling. Please refer to the documentation in this repo for further details. + +## GitHub Enterprise Support + +The solution supports both GHEC (GitHub Enterprise Cloud) and GHES (GitHub Enterprise Server) editions as well as regular GitHub. Both PAT (personal access token) and GitHub App authentication works for installations that will be deploying either repository level and / or organization level runners. If you need to deploy enterprise level runners then you are restricted to PAT based authentication as GitHub doesn't support GitHub App based authentication for enterprise runners currently. + +If you are deploying this solution into a GHES environment then you will need to be running version >= [3.6.0](https://docs.github.com/en/enterprise-server@3.6/admin/release-notes). + +When deploying the solution for a GHES environment you need to provide an additional environment variable as part of the controller deployment: + +```shell +kubectl set env deploy controller-manager -c manager GITHUB_ENTERPRISE_URL= --namespace actions-runner-system +``` + +**_Note: The repository maintainers do not have an enterprise environment (cloud or server). Support for the enterprise specific feature set is community driven and on a best effort basis. PRs from the community are welcome to add features and maintain support._** + +## Software Installed in the Runner Image + +**Cloud Tooling**
+The project supports being deployed on the various cloud Kubernetes platforms (e.g. EKS), it does not however aim to go beyond that. No cloud specific tooling is bundled in the base runner, this is an active decision to keep the overhead of maintaining the solution manageable. + +**Bundled Software**
+The GitHub hosted runners include a large amount of pre-installed software packages. GitHub maintains a list in README files at . + +This solution maintains a few Ubuntu based runner images, these images do not contain all of the software installed on the GitHub runners. The images contain the following subset of packages from the GitHub runners: + +- Some basic CLI packages +- Git +- Git LFS +- Docker +- Docker Compose + +The virtual environments from GitHub contain a lot more software packages (different versions of Java, Node.js, Golang, .NET, etc) which are not provided in the runner image. Most of these have dedicated setup actions which allow the tools to be installed on-demand in a workflow, for example: `actions/setup-java` or `actions/setup-node` + +If there is a need to include packages in the runner image for which there is no setup action, then this can be achieved by building a custom container image for the runner. The easiest way is to start with the `summerwind/actions-runner` image and then install the extra dependencies directly in the docker image: + +```shell +FROM summerwind/actions-runner:latest + +RUN sudo apt-get update -y \ + && sudo apt-get install $YOUR_PACKAGES + && sudo rm -rf /var/lib/apt/lists/* +``` + +You can then configure the runner to use a custom docker image by configuring the `image` field of a `RunnerDeployment` or `RunnerSet`: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: custom-runner +spec: + repository: actions/actions-runner-controller + image: YOUR_CUSTOM_RUNNER_IMAGE +``` diff --git a/docs/authenticating-to-the-github-api.md b/docs/authenticating-to-the-github-api.md new file mode 100644 index 0000000000..9a5930ec04 --- /dev/null +++ b/docs/authenticating-to-the-github-api.md @@ -0,0 +1,208 @@ +# Authenticating to the GitHub API + +## Setting Up Authentication with GitHub API + +There are two ways for actions-runner-controller to authenticate with the GitHub API (only 1 can be configured at a time however): + +1. Using a GitHub App (not supported for enterprise level runners due to lack of support from GitHub) +2. Using a PAT + +Functionality wise, there isn't much of a difference between the 2 authentication methods. The primary benefit of authenticating via a GitHub App is an [increased API quota](https://docs.github.com/en/developers/apps/rate-limits-for-github-apps). + +If you are deploying the solution for a GHES environment you are able to [configure your rate limit settings](https://docs.github.com/en/enterprise-server@3.0/admin/configuration/configuring-rate-limits) making the main benefit irrelevant. If you're deploying the solution for a GHEC or regular GitHub environment and you run into rate limit issues, consider deploying the solution using the GitHub App authentication method instead. + +### Deploying Using GitHub App Authentication + +You can create a GitHub App for either your user account or any organization, below are the app permissions required for each supported type of runner: + +_Note: Links are provided further down to create an app for your logged in user account or an organization with the permissions for all runner types set in each link's query string_ + +**Required Permissions for Repository Runners:**
+**Repository Permissions** + +* Actions (read) +* Administration (read / write) +* Checks (read) (if you are going to use [Webhook Driven Scaling](#webhook-driven-scaling)) +* Metadata (read) + +**Required Permissions for Organization Runners:**
+**Repository Permissions** + +* Actions (read) +* Metadata (read) + +**Organization Permissions** + +* Self-hosted runners (read / write) + +_Note: All API routes mapped to their permissions can be found [here](https://docs.github.com/en/rest/reference/permissions-required-for-github-apps) if you wish to review_ + +**Subscribe to events** + +At this point you have a choice of configuring a webhook, a webhook is needed if you are going to use [webhook driven scaling](#webhook-driven-scaling). The webhook can be configured centrally in the GitHub app itself or separately. In either case you need to subscribe to the `Workflow Job` event. + +--- + +**Setup Steps** + +If you want to create a GitHub App for your account, open the following link to the creation page, enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page. + +- [Create GitHub Apps on your account](https://github.com/settings/apps/new?url=http://github.com/actions/actions-runner-controller&webhook_active=false&public=false&administration=write&actions=read) + +If you want to create a GitHub App for your organization, replace the `:org` part of the following URL with your organization name before opening it. Then enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page to create a GitHub App. + +- [Create GitHub Apps on your organization](https://github.com/organizations/:org/settings/apps/new?url=http://github.com/actions/actions-runner-controller&webhook_active=false&public=false&administration=write&organization_self_hosted_runners=write&actions=read&checks=read) + +You will see an *App ID* on the page of the GitHub App you created as follows, the value of this App ID will be used later. + +App ID + +Download the private key file by pushing the "Generate a private key" button at the bottom of the GitHub App page. This file will also be used later. + +Generate a private key + +Go to the "Install App" tab on the left side of the page and install the GitHub App that you created for your account or organization. + +Install App + +When the installation is complete, you will be taken to a URL in one of the following formats, the last number of the URL will be used as the Installation ID later (For example, if the URL ends in `settings/installations/12345`, then the Installation ID is `12345`). + +- `https://github.com/settings/installations/${INSTALLATION_ID}` +- `https://github.com/organizations/eventreactor/settings/installations/${INSTALLATION_ID}` + + +Finally, register the App ID (`APP_ID`), Installation ID (`INSTALLATION_ID`), and the downloaded private key file (`PRIVATE_KEY_FILE_PATH`) to Kubernetes as a secret. + +**Kubectl Deployment:** + +```shell +$ kubectl create secret generic controller-manager \ + -n actions-runner-system \ + --from-literal=github_app_id=${APP_ID} \ + --from-literal=github_app_installation_id=${INSTALLATION_ID} \ + --from-file=github_app_private_key=${PRIVATE_KEY_FILE_PATH} +``` + +**Helm Deployment:** + +Configure your values.yaml, see the chart's [README](../charts/actions-runner-controller/README.md) for deploying the secret via Helm + +### Deploying Using PAT Authentication + +Personal Access Tokens can be used to register a self-hosted runner by *actions-runner-controller*. + +Log-in to a GitHub account that has `admin` privileges for the repository, and [create a personal access token](https://github.com/settings/tokens/new) with the appropriate scopes listed below: + +**Required Scopes for Repository Runners** + +* repo (Full control) + +**Required Scopes for Organization Runners** + +* repo (Full control) +* admin:org (Full control) +* admin:public_key (read:public_key) +* admin:repo_hook (read:repo_hook) +* admin:org_hook (Full control) +* notifications (Full control) +* workflow (Full control) + +**Required Scopes for Enterprise Runners** + +* admin:enterprise (manage_runners:enterprise) + +_Note: When you deploy enterprise runners they will get access to organizations, however, access to the repositories themselves is **NOT** allowed by default. Each GitHub organization must allow enterprise runner groups to be used in repositories as an initial one-time configuration step, this only needs to be done once after which it is permanent for that runner group._ + +_Note: GitHub does not document exactly what permissions you get with each PAT scope beyond a vague description. The best documentation they provide on the topic can be found [here](https://docs.github.com/en/developers/apps/building-oauth-apps/scopes-for-oauth-apps) if you wish to review. The docs target OAuth apps and so are incomplete and may not be 100% accurate._ + +--- + +Once you have created the appropriate token, deploy it as a secret to your Kubernetes cluster that you are going to deploy the solution on: + +**Kubectl Deployment:** + +```shell +kubectl create secret generic controller-manager \ + -n actions-runner-system \ + --from-literal=github_token=${GITHUB_TOKEN} +``` + +**Helm Deployment:** + +Configure your values.yaml, see the chart's [README](../charts/actions-runner-controller/README.md) for deploying the secret via Helm + + +### Using without cert-manager + +There are two methods of deploying without cert-manager, you can generate your own certificates or rely on helm to generate a CA and certificate each time you update the chart. + +#### Using custom certificates + +Assuming you are installing in the default namespace, ensure your certificate has SANs: + +* `actions-runner-controller-webhook.actions-runner-system.svc` +* `actions-runner-controller-webhook.actions-runner-system.svc.cluster.local` + +It is possible to use a self-signed certificate by following a guide like +[this one](https://mariadb.com/docs/security/encryption/in-transit/create-self-signed-certificates-keys-openssl/) +using `openssl`. + +Install your certificate as a TLS secret: + +```shell +$ kubectl create secret tls actions-runner-controller-serving-cert \ + -n actions-runner-system \ + --cert=path/to/cert/file \ + --key=path/to/key/file +``` + +Set the Helm chart values as follows: + +```shell +$ CA_BUNDLE=$(cat path/to/ca.pem | base64) +$ helm upgrade --install actions/actions-runner-controller \ + certManagerEnabled=false \ + admissionWebHooks.caBundle=${CA_BUNDLE} +``` + +#### Using helm to generate CA and certificates + +Set the Helm chart values as follows: + +```shell +$ helm upgrade --install actions/actions-runner-controller \ + certManagerEnabled=false +``` + +This generates a temporary CA using the helm `genCA` function and issues a certificate for the webhook. Note that this approach rotates the CA and certificate each time `helm install` or `helm upgrade` are run. In effect, this will cause short interruptions to the mutating webhook while the ARC pods stabilize and use the new certificate each time `helm upgrade` is called for the chart. The outage can affect kube-api activity due to the way mutating webhooks are called. + +### Using IRSA (IAM Roles for Service Accounts) in EKS + +> This feature requires controller version => [v0.15.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.15.0) + +Similar to regular pods and deployments, you firstly need an existing service account with the IAM role associated. +Create one using e.g. `eksctl`. You can refer to [the EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) for more details. + +Once you set up the service account, all you need is to add `serviceAccountName` and `fsGroup` to any pods that use the IAM-role enabled service account. + +`fsGroup` needs to be set to the UID of the `runner` Linux user that runs the runner agent (and dockerd in case you use dind-runner). For anyone using an Ubuntu 20.04 runner image it's `1000` and for Ubuntu 22.04 one it's `1001`. + +For `RunnerDeployment`, you can set those two fields under the runner spec at `RunnerDeployment.Spec.Template`: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runnerdeploy +spec: + template: + spec: + repository: USER/REO + serviceAccountName: my-service-account + securityContext: + # For Ubuntu 20.04 runner + fsGroup: 1000 + # Use 1001 for Ubuntu 22.04 runner + #fsGroup: 1001 +``` + diff --git a/docs/automatically-scaling-runners.md b/docs/automatically-scaling-runners.md new file mode 100644 index 0000000000..7c9c057a90 --- /dev/null +++ b/docs/automatically-scaling-runners.md @@ -0,0 +1,717 @@ +# Automatically scaling runners + +## Overview + +> If you are using controller version < [v0.22.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.22.0) and you are not using GHES, and so you can't set your rate limit budget, it is recommended that you use 100 replicas or fewer to prevent being rate limited. + +A `RunnerDeployment` or `RunnerSet` can scale the number of runners between `minReplicas` and `maxReplicas` fields driven by either pull based scaling metrics or via a webhook event. Whether the autoscaling is driven from a webhook event or pull based metrics it is implemented by backing a `RunnerDeployment` or `RunnerSet` kind with a `HorizontalRunnerAutoscaler` kind. + +**_Important!!! If you opt to configure autoscaling, ensure you remove the `replicas:` attribute in the `RunnerDeployment` / `RunnerSet` kinds that are configured for autoscaling [#206](https://github.com/actions/actions-runner-controller/issues/206#issuecomment-748601907)_** + +## Anti-Flapping Configuration + +For both pull driven or webhook driven scaling an anti-flapping implementation is included, by default a runner won't be scaled down within 10 minutes of it having been scaled up. + +This anti-flap configuration also has the final say on if a runner can be scaled down or not regardless of the chosen scaling method. + +This delay is configurable via 2 methods: + +1. By setting a new default via the controller's `--default-scale-down-delay` flag +2. By setting by setting the attribute `scaleDownDelaySecondsAfterScaleOut:` in a `HorizontalRunnerAutoscaler` kind's `spec:`. + +Below is a complete basic example of one of the pull driven scaling metrics. + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runner-deployment +spec: + template: + spec: + repository: example/myrepo +--- +apiVersion: actions.summerwind.dev/v1alpha1 +kind: HorizontalRunnerAutoscaler +metadata: + name: example-runner-deployment-autoscaler +spec: + # Runners in the targeted RunnerDeployment won't be scaled down + # for 5 minutes instead of the default 10 minutes now + scaleDownDelaySecondsAfterScaleOut: 300 + scaleTargetRef: + kind: RunnerDeployment + # # In case the scale target is RunnerSet: + # kind: RunnerSet + name: example-runner-deployment + minReplicas: 1 + maxReplicas: 5 + metrics: + - type: PercentageRunnersBusy + scaleUpThreshold: '0.75' + scaleDownThreshold: '0.25' + scaleUpFactor: '2' + scaleDownFactor: '0.5' +``` + +## Pull Driven Scaling + +> To configure webhook driven scaling see the [Webhook Driven Scaling](#webhook-driven-scaling) section + +The pull based metrics are configured in the `metrics` attribute of a HRA (see snippet below). The period between polls is defined by the controller's `--sync-period` flag. If this flag isn't provided then the controller defaults to a sync period of `1m`, this can be configured in seconds or minutes. + +Be aware that the shorter the sync period the quicker you will consume your rate limit budget, depending on your environment this may or may not be a risk. Consider monitoring ARCs rate limit budget when configuring this feature to find the optimal performance sync period. + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: HorizontalRunnerAutoscaler +metadata: + name: example-runner-deployment-autoscaler +spec: + scaleTargetRef: + kind: RunnerDeployment + # # In case the scale target is RunnerSet: + # kind: RunnerSet + name: example-runner-deployment + minReplicas: 1 + maxReplicas: 5 + # Your chosen scaling metrics here + metrics: [] +``` + +**Metric Options:** + +**TotalNumberOfQueuedAndInProgressWorkflowRuns** + +The `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric polls GitHub for all pending workflow runs against a given set of repositories. The metric will scale the runner count up to the total number of pending jobs at the sync time up to the `maxReplicas` configuration. + +**Benefits of this metric** +1. Supports named repositories allowing you to restrict the runner to a specified set of repositories server-side. +2. Scales the runner count based on the depth of the job queue meaning a 1:1 scaling of runners to queued jobs. +3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [GitHub labels](#runner-labels). + +**Drawbacks of this metric** +1. A list of repositories must be included within the scaling metric. Maintaining a list of repositories may not be viable in larger environments or self-serve environments. +2. May not scale quickly enough for some users' needs. This metric is pull based and so the queue depth is polled as configured by the sync period, as a result scaling performance is bound by this sync period meaning there is a lag to scaling activity. +3. Relatively large amounts of API requests are required to maintain this metric, you may run into API rate limit issues depending on the size of your environment and how aggressive your sync period configuration is. + +Example `RunnerDeployment` backed by a `HorizontalRunnerAutoscaler`: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runner-deployment +spec: + template: + spec: + repository: example/myrepo +--- +apiVersion: actions.summerwind.dev/v1alpha1 +kind: HorizontalRunnerAutoscaler +metadata: + name: example-runner-deployment-autoscaler +spec: + scaleTargetRef: + kind: RunnerDeployment + # # In case the scale target is RunnerSet: + # kind: RunnerSet + name: example-runner-deployment + minReplicas: 1 + maxReplicas: 5 + metrics: + - type: TotalNumberOfQueuedAndInProgressWorkflowRuns + repositoryNames: + # A repository name is the REPO part of `github.com/OWNER/REPO` + - myrepo +``` + +**PercentageRunnersBusy** + +The `HorizontalRunnerAutoscaler` will poll GitHub for the number of runners in the `busy` state which live in the RunnerDeployment's namespace, it will then scale depending on how you have configured the scale factors. + +**Benefits of this metric** +1. Supports named repositories server-side the same as the `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric [#313](https://github.com/actions/actions-runner-controller/pull/313) +2. Supports GitHub organization wide scaling without maintaining an explicit list of repositories, this is especially useful for those that are working at a larger scale. [#223](https://github.com/actions/actions-runner-controller/pull/223) +3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [GitHub labels](#runner-labels) +4. Supports scaling desired runner count on both a percentage increase / decrease basis as well as on a fixed increase / decrease count basis [#223](https://github.com/actions/actions-runner-controller/pull/223) [#315](https://github.com/actions/actions-runner-controller/pull/315) + +**Drawbacks of this metric** +1. May not scale quickly enough for some users' needs. This metric is pull based and so the number of busy runners is polled as configured by the sync period, as a result scaling performance is bound by this sync period meaning there is a lag to scaling activity. +2. We are scaling up and down based on indicative information rather than a count of the actual number of queued jobs and so the desired runner count is likely to under provision new runners or overprovision them relative to actual job queue depth, this may or may not be a problem for you. + +Examples of each scaling type implemented with a `RunnerDeployment` backed by a `HorizontalRunnerAutoscaler`: + +```yaml +--- +apiVersion: actions.summerwind.dev/v1alpha1 +kind: HorizontalRunnerAutoscaler +metadata: + name: example-runner-deployment-autoscaler +spec: + scaleTargetRef: + kind: RunnerDeployment + # # In case the scale target is RunnerSet: + # kind: RunnerSet + name: example-runner-deployment + minReplicas: 1 + maxReplicas: 5 + metrics: + - type: PercentageRunnersBusy + scaleUpThreshold: '0.75' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale up + scaleDownThreshold: '0.3' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale down + scaleUpFactor: '1.4' # The scale up multiplier factor applied to desired count + scaleDownFactor: '0.7' # The scale down multiplier factor applied to desired count +``` + +```yaml +--- +apiVersion: actions.summerwind.dev/v1alpha1 +kind: HorizontalRunnerAutoscaler +metadata: + name: example-runner-deployment-autoscaler +spec: + scaleTargetRef: + kind: RunnerDeployment + # # In case the scale target is RunnerSet: + # kind: RunnerSet + name: example-runner-deployment + minReplicas: 1 + maxReplicas: 5 + metrics: + - type: PercentageRunnersBusy + scaleUpThreshold: '0.75' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale up + scaleDownThreshold: '0.3' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale down + scaleUpAdjustment: 2 # The scale up runner count added to desired count + scaleDownAdjustment: 1 # The scale down runner count subtracted from the desired count +``` + +## Webhook Driven Scaling + +> This feature requires controller version => [v0.20.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.20.0) + +> To configure pull driven scaling see the [Pull Driven Scaling](#pull-driven-scaling) section + +Alternatively ARC can be configured to scale based on the `workflow_job` webhook event. The primary benefit of autoscaling on webhooks compared to the pull driven scaling is that ARC is immediately notified of the scaling need. + +Webhooks are processed by a separate webhook server. The webhook server receives `workflow_job` webhook events and scales RunnerDeployments / RunnerSets by updating HRAs configured for the webhook trigger. Below is an example set-up where a HRA has been configured to scale a `RunnerDeployment` from a `workflow_job` event: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runners +spec: + template: + spec: + repository: example/myrepo +--- +apiVersion: actions.summerwind.dev/v1alpha1 +kind: HorizontalRunnerAutoscaler +metadata: + name: example-runners +spec: + minReplicas: 1 + maxReplicas: 10 + scaleTargetRef: + kind: RunnerDeployment + # # In case the scale target is RunnerSet: + # kind: RunnerSet + name: example-runners + scaleUpTriggers: + - githubEvent: + workflowJob: {} + duration: "30m" +``` + +The lifecycle of a runner provisioned from a webhook is different to a runner provisioned from the pull based scaling method: + +1. GitHub sends a `workflow_job` event to ARC with `status=queued` +2. ARC finds a HRA with a `workflow_job` webhook scale trigger that backs a RunnerDeployment / RunnerSet with matching runner labels +3. The matched HRA adds a unit to its `capacityReservations` list +4. ARC adds a replica and sets the EffectiveTime of that replica to current + `HRA.spec.scaleUpTriggers[].duration` + +At this point there are a few things that can happen, either the job gets allocated to the runner or the runner is left dangling due to it not being used, if the runner gets assigned the job that triggered the scale up the lifecycle looks like this: + +1. The new runner gets allocated the job and processes it +2. Upon the job ending GitHub sends another `workflow_job` event to ARC but with `status=completed` +3. The HRA removes the oldest capacity reservation from its `capacityReservations` and picks a runner to terminate ensuring it isn't busy via the GitHub API beforehand + +If the job is cancelled before it is allocated to a runner then the lifecycle looks like this: + +1. Upon the job cancellation GitHub sends another `workflow_job` event to ARC but with `status=cancelled` +2. The HRA removes the oldest capacity reservation from its `capacityReservations` and picks a runner to terminate ensuring it isn't busy via the GitHub API beforehand + +If runner is never used due to other runners matching needed runner group and required runner labels are allocated the job then the lifecycle looks like this: + +1. The scale trigger duration specified via `HRA.spec.scaleUpTriggers[].duration` elapses +2. The HRA thinks the capacity reservation is expired, removes it from HRA's `capacityReservations` and terminates the expired runner ensuring it isn't busy via the GitHub API beforehand + +Your `HRA.spec.scaleUpTriggers[].duration` value should be set long enough to account for the following things: + +1. the potential amount of time it could take for a pod to become `Running` e.g. you need to scale horizontally because there isn't a node avaliable +2. the amount of time it takes for GitHub to allocate a job to that runner +3. the amount of time it takes for the runner to notice the allocated job and starts running it + +### Install with Helm + +To enable this feature, you first need to install the GitHub webhook server. To install via our Helm chart, +_[see the values documentation for all configuration options](../charts/actions-runner-controller/README.md)_ + +```console +$ helm upgrade --install --namespace actions-runner-system --create-namespace \ + --wait actions-runner-controller actions/actions-runner-controller \ + --set "githubWebhookServer.enabled=true,service.type=NodePort,githubWebhookServer.ports[0].nodePort=33080" +``` + +The above command will result in exposing the node port 33080 for Webhook events. +Usually, you need to create an external load balancer targeted to the node port, +and register the hostname or the IP address of the external load balancer to the GitHub Webhook. + +**With a custom Kubernetes ingress controller:** + +> **CAUTION:** The Kubernetes ingress controllers described below is just a suggestion from the community and +> the ARC team will not provide any user support for ingress controllers as it's not a part of this project. +> +> The following guide on creating an ingress has been contributed by the awesome ARC community and is provided here as-is. +> You may, however, still be able to ask for help on the community on GitHub Discussions if you have any problems. + +Kubernetes provides `Ingress` resources to let you configure your ingress controller to expose a Kubernetes service. +If you plan to expose ARC via Ingress, you might not be required to make it a `NodePort` service +(although nothing would prevent an ingress controller to expose NodePort services too): + +```console +$ helm upgrade --install --namespace actions-runner-system --create-namespace \ + --wait actions-runner-controller actions/actions-runner-controller \ + --set "githubWebhookServer.enabled=true" +``` + +The command above will create a new deployment and a service for receiving Github Webhooks on the `actions-runner-system` namespace. + +Now we need to expose this service so that GitHub can send these webhooks over the network with TLS protection. + +You can do it in any way you prefer, here we'll suggest doing it with a k8s Ingress. +For the sake of this example we'll expose this service on the following URL: + +- https://your.domain.com/actions-runner-controller-github-webhook-server + +Where `your.domain.com` should be replaced by your own domain. + +> Note: This step assumes you already have a configured `cert-manager` and domain name for your cluster. + +Let's start by creating an Ingress file called `arc-webhook-server.yaml` with the following contents: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: actions-runner-controller-github-webhook-server + namespace: actions-runner-system + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" +spec: + tls: + - hosts: + - your.domain.com + secretName: your-tls-secret-name + rules: + - http: + paths: + - path: /actions-runner-controller-github-webhook-server + pathType: Prefix + backend: + service: + name: actions-runner-controller-github-webhook-server + port: + number: 80 +``` + +Make sure to set the `spec.tls.secretName` to the name of your TLS secret and +`spec.tls.hosts[0]` to your own domain. + +Then create this resource on your cluster with the following command: + +```bash +kubectl apply -n actions-runner-system -f arc-webhook-server.yaml +``` + +**Configuring GitHub for sending webhooks for our newly created webhook server:** + +After this step your webhook server should be ready to start receiving webhooks from GitHub. + +To configure GitHub to start sending you webhooks, go to the settings page of your repository +or organization then click on `Webhooks`, then on `Add webhook`. + +There set the "Payload URL" field with the webhook URL you just created, +if you followed the example ingress above the URL would be something like this: + +- https://your.domain.com/actions-runner-controller-github-webhook-server + +> Remember to replace `your.domain.com` with your own domain. + +Then click on "Content type" and choose `application/json`. + +Then click on "let me select individual events" and choose `Workflow Jobs`. + +Then click on `Add Webhook`. + +GitHub will then send a `ping` event to your webhook server to check if it is working, if it is you'll see a green V mark +alongside your webhook on the Settings -> Webhooks page. + +Once you were able to confirm that the Webhook server is ready and running from GitHub create or update your +`HorizontalRunnerAutoscaler` resources by learning the following configuration examples. + +### Install with Kustomize + +To install this feature using Kustomize, add `github-webhook-server` resources to your `kustomization.yaml` file as in the example below: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +# You should already have this +- github.com/actions/actions-runner-controller/config//default?ref=v0.22.2 +# Add the below! +- github.com/actions/actions-runner-controller/config//github-webhook-server?ref=v0.22.2 + +Finally, you will have to configure an ingress so that you may configure the webhook in github. An example of such ingress can be find below: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: actions-runners-webhook-server +spec: + rules: + - http: + paths: + - path: / + backend: + service: + name: github-webhook-server + port: + number: 80 + pathType: Exact + +``` + +## Autoscaling to/from 0 + +> This feature requires controller version => [v0.19.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.19.0) + +The regular `RunnerDeployment` / `RunnerSet` `replicas:` attribute as well as the `HorizontalRunnerAutoscaler` `minReplicas:` attribute supports being set to 0. + +The main use case for scaling from 0 is with the `HorizontalRunnerAutoscaler` kind. To scale from 0 whilst still being able to provision runners as jobs are queued we must use the `HorizontalRunnerAutoscaler` with only certain scaling configurations, only the below configurations support scaling from 0 whilst also being able to provision runners as jobs are queued: + +- `TotalNumberOfQueuedAndInProgressWorkflowRuns` +- `PercentageRunnersBusy` + `TotalNumberOfQueuedAndInProgressWorkflowRuns` +- Webhook-based autoscaling + +`PercentageRunnersBusy` can't be used alone for scale-from-zero as, by its definition, it needs one or more GitHub runners to become `busy` to be able to scale. If there isn't a runner to pick up a job and enter a `busy` state then the controller will never know to provision a runner to begin with as this metric has no knowledge of the job queue and is relying on using the number of busy runners as a means for calculating the desired replica count. + +If a HorizontalRunnerAutoscaler is configured with a secondary metric of `TotalNumberOfQueuedAndInProgressWorkflowRuns` then be aware that the controller will check the primary metric of `PercentageRunnersBusy` first and will only use the secondary metric to calculate the desired replica count if the primary metric returns 0 desired replicas. + +Webhook-based autoscaling is the best option as it is relatively easy to configure and also it can scale quickly. + +## Scheduled Overrides + +> This feature requires controller version => [v0.19.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.19.0) + +`Scheduled Overrides` allows you to configure `HorizontalRunnerAutoscaler` so that its `spec:` gets updated only during a certain period of time. This feature is usually used for the following scenarios: + +- You want to reduce your infrastructure costs by scaling your Kubernetes nodes down outside a given period +- You want to scale for scheduled spikes in workloads + +The most basic usage of this feature is to set a non-repeating override: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: HorizontalRunnerAutoscaler +metadata: + name: example-runner-deployment-autoscaler +spec: + scaleTargetRef: + kind: RunnerDeployment + # # In case the scale target is RunnerSet: + # kind: RunnerSet + name: example-runner-deployment + scheduledOverrides: + # Override minReplicas to 100 only between 2021-06-01T00:00:00+09:00 and 2021-06-03T00:00:00+09:00 + - startTime: "2021-06-01T00:00:00+09:00" + endTime: "2021-06-03T00:00:00+09:00" + minReplicas: 100 + minReplicas: 1 +``` + +A scheduled override without `recurrenceRule` is considered a one-off override, that is active between `startTime` and `endTime`. In the second scenario, it overrides `minReplicas` to `100` only between `2021-06-01T00:00:00+09:00` and `2021-06-03T00:00:00+09:00`. + +A more advanced configuration is to include a `recurrenceRule` in the override: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: HorizontalRunnerAutoscaler +metadata: + name: example-runner-deployment-autoscaler +spec: + scaleTargetRef: + kind: RunnerDeployment + # # In case the scale target is RunnerSet: + # kind: RunnerSet + name: example-runner-deployment + scheduledOverrides: + # Override minReplicas to 0 only between 0am sat to 0am mon + - startTime: "2021-05-01T00:00:00+09:00" + endTime: "2021-05-03T00:00:00+09:00" + recurrenceRule: + frequency: Weekly + # Optional sunset datetime attribute + # untilTime: "2022-05-01T00:00:00+09:00" + minReplicas: 0 + minReplicas: 1 +``` + + A recurring override is initially active between `startTime` and `endTime`, and then it repeatedly gets activated after a certain period of time denoted by `frequency`. + +`frequecy` can take one of the following values: + +- `Daily` +- `Weekly` +- `Monthly` +- `Yearly` + +By default, a scheduled override repeats forever. If you want it to repeat until a specific point in time, define `untilTime`. The controller creates the last recurrence of the override until the recurrence's `startTime` is equal or earlier than `untilTime`. + +Do ensure that you have enough slack for `untilTime` so that a delayed or offline `actions-runner-controller` is much less likely to miss the last recurrence. For example, you might want to set `untilTime` to `M` minutes after the last recurrence's `startTime`, so that `actions-runner-controller` being offline up to `M` minutes doesn't miss the last recurrence. + +**Combining Multiple Scheduled Overrides**: + +In case you have a more complex scenario, try writing two or more entries under `scheduledOverrides`. + +The earlier entry is prioritized higher than later entries. So you usually define one-time overrides at the top of your list, then yearly, monthly, weekly, and lastly daily overrides. + +A common use case for this may be to have 1 override to scale to 0 during the week outside of core business hours and another override to scale to 0 during all hours of the weekend. + +## Configuring automatic termination + +As of ARC 0.27.0 (unreleased as of 2022/09/30), runners can only wait for 15 seconds by default on pod termination. + +This can be problematic in two scenarios: + +- Scenario 1 - RunnerSet-only: You're triggering updates other than replica changes to `RunnerSet` very often- With current implementation, every update except `replicas` change to RunnerSet may result in terminating the in-progress workflow jobs to fail. +- Scenario 2 - RunnerDeployment and RunnerSet: You have another Kubernetes controller that evicts runner pods directly, not consulting ARC. + +> RunnerDeployment is not affected by the Scenario 1 as RunnerDeployment-managed runners are already tolerable to unlimitedly long in-progress running job while being replaced, as it's graceful termination process is handled outside of the entrypoint and the Kubernetes' pod termination process. + +To make it more reliable, please set `spec.template.spec.terminationGracePeriodSeconds` field and the `RUNNER_GRACEFUL_STOP_TIMEOUT` environment variable appropriately. + +If you want the pod to terminate in approximately 110 seconds at the latest since the termination request, try `terminationGracePeriodSeconds` of `110` and `RUNNER_GRACEFUL_STOP_TIMEOUT` of like `90`. + +The difference between `terminationGracePeriodSeconds` and `RUNNER_GRACEFUL_STOP_TIMEOUT` can vary depending on your environment and cluster. + +The idea is two fold: + +- `RUNNER_GRACEFUL_STOP_TIMEOUT` is for giving the runner the longest possible time to wait for the in-progress job to complete. You should keep this smaller than `terminationGracePeriodSeconds` so that you don't unnecessarily cancel running jobs. +- `terminationGracePeriodSeconds` is for giving the runner the longest possible time to stop before disappear. If the pod forcefully terminated before a graceful stop, the job running within the runner pod can hang like 10 minutes in the GitHub Actions Workflow Run/Job UI. A correct value for this avoids the hang, even though it had to cancel the running job due to the approaching deadline. + +> We know the default 15 seconds timeout is too short to be useful at all. +> In near future, we might raise the default to, for example, 100 seconds, so that runners that are tend to run up to 100 seconds jobs can +> terminate gracefully without failing running jobs. It will also allow the job which were running on the node that was requsted for termination +> to correct report its status as "cancelled", rather than hanging approximately 10 minutes in the Actions Web UI until it finally fails(without any specific error message). +> 100 seconds is just an example. It might be a good default in case you're using AWS EC2 Spot Instances because they tend to send +> termination notice two minutes before the termination. +> If you have any other suggestions for the default value, please share your thoughts in Discussions. + +## Additional Settings + +You can pass details through the spec selector. Here's an eg. of what you may like to do: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: actions-runner + namespace: default +spec: + replicas: 2 + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + spec: + priorityClassName: "high" + nodeSelector: + node-role.kubernetes.io/test: "" + + securityContext: + #All level/role/type/user values will vary based on your SELinux policies. + #See https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html/container_security_guide/docker_selinux_security_policy for information about SELinux with containers + seLinuxOptions: + level: "s0" + role: "system_r" + type: "super_t" + user: "system_u" + + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/test + operator: Exists + + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + runner-deployment-name: actions-runner + + repository: mumoshu/actions-runner-controller-ci + # The default "summerwind/actions-runner" images are available at DockerHub: + # https://hub.docker.com/r/summerwind/actions-runner + # You can also build your own and specify it like the below: + image: custom-image/actions-runner:latest + imagePullPolicy: Always + resources: + limits: + cpu: "4.0" + memory: "8Gi" + requests: + cpu: "2.0" + memory: "4Gi" + # Timeout after a node crashed or became unreachable to evict your pods somewhere else (default 5mins) + tolerations: + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 10 + # true (default) = The runner restarts after running jobs, to ensure a clean and reproducible build environment + # false = The runner is persistent across jobs and doesn't automatically restart + # This directly controls the behaviour of `--once` flag provided to the github runner + ephemeral: false + # true (default) = A privileged docker sidecar container is included in the runner pod. + # false = A docker sidecar container is not included in the runner pod and you can't use docker. + # If set to false, there are no privileged container and you cannot use docker. + dockerEnabled: false + # Optional Docker containers network MTU + # If your network card MTU is smaller than Docker's default 1500, you might encounter Docker networking issues. + # To fix these issues, you should setup Docker MTU smaller than or equal to that on the outgoing network card. + # More information: + # - https://mlohr.com/docker-mtu/ + dockerMTU: 1500 + # Optional Docker registry mirror + # Docker Hub has an aggressive rate-limit configuration for free plans. + # To avoid disruptions in your CI/CD pipelines, you might want to setup an external or on-premises Docker registry mirror. + # More information: + # - https://docs.docker.com/docker-hub/download-rate-limit/ + # - https://cloud.google.com/container-registry/docs/pulling-cached-images + dockerRegistryMirror: https://mirror.gcr.io/ + # false (default) = Docker support is provided by a sidecar container deployed in the runner pod. + # true = No docker sidecar container is deployed in the runner pod but docker can be used within the runner container instead. The image summerwind/actions-runner-dind is used by default. + dockerdWithinRunnerContainer: true + #Optional environment variables for docker container + # Valid only when dockerdWithinRunnerContainer=false + dockerEnv: + - name: HTTP_PROXY + value: http://example.com + # Docker sidecar container image tweaks examples below, only applicable if dockerdWithinRunnerContainer = false + dockerdContainerResources: + limits: + cpu: "4.0" + memory: "8Gi" + requests: + cpu: "2.0" + memory: "4Gi" + # Additional N number of sidecar containers + sidecarContainers: + - name: mysql + image: mysql:5.7 + env: + - name: MYSQL_ROOT_PASSWORD + value: abcd1234 + securityContext: + runAsUser: 0 + # workDir if not specified (default = /runner/_work) + # You can customise this setting allowing you to change the default working directory location + # for example, the below setting is the same as on the ubuntu-18.04 image + workDir: /home/runner/work + # You can mount some of the shared volumes to the dind container using dockerVolumeMounts, like any other volume mounting. + # NOTE: in case you want to use an hostPath like the following example, make sure that Kubernetes doesn't schedule more than one runner + # per physical host. You can achieve that by setting pod anti-affinity rules and/or resource requests/limits. + volumes: + - name: docker-extra + hostPath: + path: /mnt/docker-extra + type: DirectoryOrCreate + - name: repo + hostPath: + path: /mnt/repo + type: DirectoryOrCreate + dockerVolumeMounts: + - mountPath: /var/lib/docker + name: docker-extra + # You can mount some of the shared volumes to the runner container using volumeMounts. + # NOTE: Do not try to mount the volume onto the runner workdir itself as it will not work. You could mount it however on a subdirectory in the runner workdir + # Please see https://github.com/actions/actions-runner-controller/issues/630#issuecomment-862087323 for more information. + volumeMounts: + - mountPath: /home/runner/work/repo + name: repo + # Optional storage medium type of runner volume mount. + # More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir + # "" (default) = Node's default medium + # Memory = RAM-backed filesystem (tmpfs) + # NOTE: Using RAM-backed filesystem gives you fastest possible storage on your host nodes. + volumeStorageMedium: "" + # Total amount of local storage resources required for runner volume mount. + # The default limit is undefined. + # NOTE: You can make sure that nodes' resources are never exceeded by limiting used storage size per runner pod. + # You can even disable the runner mount completely by setting limit to zero if dockerdWithinRunnerContainer = true. + # Please see https://github.com/actions/actions-runner-controller/pull/674 for more information. + volumeSizeLimit: 4Gi + # Optional name of the container runtime configuration that should be used for pods. + # This must match the name of a RuntimeClass resource available on the cluster. + # More info: https://kubernetes.io/docs/concepts/containers/runtime-class + runtimeClassName: "runc" + # This is an advanced configuration. Don't touch it unless you know what you're doing. + containers: + - name: runner + # Usually, the runner container's privileged field is derived from dockerdWithinRunnerContainer. + # But in the case where you need to run privileged job steps even if you don't use docker/don't need dockerd within the runner container, + # just specified `privileged: true` like this. + # See https://github.com/actions/actions-runner-controller/issues/1282 + # Do note that specifying `privileged: false` while using dind is very likely to fail, even if you use some vm-based container runtimes + # like firecracker and kata. Basically they run containers within dedicated micro vms and so + # it's more like you can use `privileged: true` safer with those runtimes. + # + # privileged: true +``` + +### Status and future of this feature + +Note that this feature is currently intended for use with runner pods being terminated by other Kubernetes controller and human operators, or those being replaced by ARC RunnerSet controller due to spec change(s) except `replicas`. RunnerDeployment has no issue for the scenario. non-dind runners are affected but this feature does not support those yet. + +For example, a runner pod can be terminated prematurely by cluster-autoscaler when it's about to terminate the node on cluster scale down. +All the variants of RunnerDeployment and RunnerSet managed runner pods, including runners with dockerd sidecars, rootless and rootful dind runners are affected by it. For dind runner pods only, you can use this feature to fix or alleviate the issue. + +To be clear, an increase/decrease in the desired replicas of RunnerDeployment and RunnerSet will never result in worklfow jobs being termianted prematurely. +That's because it's handled BEFORE the runner pod is terminated, by ARC respective controller. + +For anyone interested in improving it, adding a dedicated pod finalizer for this issue will never work. +It's due to that a pod finalizer can't prevent SIGTERM from being sent when deletionTimestamp is updated to non-zero, +which triggers a Kubernetes pod termination process anyway. +What we want here is to delay the SIGTERM sent to the `actions/runner` process running within the runner container of the runner pod, +not blocking the removal of the pod resource in the Kubernetes cluster. + +Also, handling all the graceful termination scenarios with a single method may or may not work. + +The most viable option would be to do the graceful termination handling entirely in the SIGTERM handler within the runner entrypoint. +But this may or may not work long-term, as it's subject to terminationGracePeriodSeconds anyway and the author of this note thinks there still is +no formally defined limit for terminationGracePeriodSeconds and hence we arent' sure how long terminationGracePeriodSeconds can be set in practice. +Also, I think the max workflow job duration is approximately 24h. So Kubernetes must formally support setting terminationGracePeriodSeconds of 24h if +we are moving entirely to the entrypoint based solution. +If you have any insights about the matter, chime in to the development of ARC! + +That's why we still rely on ARC's own graceful termination logic in Runner controller for the spec change and replica increase/decrease of RunnerDeployment and +replica increase/decrease of RunnerSet, even though we now have the entrypoint based graceful stop handler. + +Our plan is to improve the RunnerSet to have the same logic as the Runner controller so that you don't need this feature based on the SIGTERM handler for the spec change of RunnerSet. diff --git a/docs/choosing-runner-destination.md b/docs/choosing-runner-destination.md new file mode 100644 index 0000000000..3d06d0ce12 --- /dev/null +++ b/docs/choosing-runner-destination.md @@ -0,0 +1,91 @@ +# Adding ARC runners to a repository, organization, or enterprise + +## Usage + +[GitHub self-hosted runners can be deployed at various levels in a management hierarchy](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners#about-self-hosted-runners): +- The repository level +- The organization level +- The enterprise level + +Runners can be deployed as 1 of 2 abstractions: + +- A `RunnerDeployment` (similar to k8s's `Deployments`, based on `Pods`) +- A `RunnerSet` (based on k8s's `StatefulSets`) + +We go into details about the differences between the 2 later, initially lets look at how to deploy a basic `RunnerDeployment` at the 3 possible management hierarchies. + +## Adding runners to a repository + +To launch a single self-hosted runner, you need to create a manifest file that includes a `RunnerDeployment` resource as follows. This example launches a self-hosted runner with name *example-runnerdeploy* for the *actions/actions-runner-controller* repository. + +```yaml +# runnerdeployment.yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runnerdeploy +spec: + replicas: 1 + template: + spec: + repository: mumoshu/actions-runner-controller-ci +``` + +Apply the created manifest file to your Kubernetes. + +```shell +$ kubectl apply -f runnerdeployment.yaml +runnerdeployment.actions.summerwind.dev/example-runnerdeploy created +``` + +You can see that 1 runner and its underlying pod has been created as specified by `replicas: 1` attribute: + +```shell +$ kubectl get runners +NAME REPOSITORY STATUS +example-runnerdeploy2475h595fr mumoshu/actions-runner-controller-ci Running + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +example-runnerdeploy2475ht2qbr 2/2 Running 0 1m +``` + +The runner you created has been registered directly to the defined repository, you should be able to see it in the settings of the repository. + +Now you can use your self-hosted runner. See the [official documentation](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/using-self-hosted-runners-in-a-workflow) on how to run a job with it. + +## Adding runners to an organization + +To add the runner to an organization, you only need to replace the `repository` field with `organization`, so the runner will register itself to the organization. + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runnerdeploy +spec: + replicas: 1 + template: + spec: + organization: your-organization-name +``` + +Now you can see the runner on the organization level (if you have organization owner permissions). + +## Adding runners to an enterprise + +To add the runner to an enterprise, you only need to replace the `repository` field with `enterprise`, so the runner will register itself to the enterprise. + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runnerdeploy +spec: + replicas: 1 + template: + spec: + enterprise: your-enterprise-name +``` + +Now you can see the runner on the enterprise level (if you have enterprise access permissions). diff --git a/docs/configuring-windows-runners.md b/docs/configuring-windows-runners.md new file mode 100644 index 0000000000..0eb1b3febd --- /dev/null +++ b/docs/configuring-windows-runners.md @@ -0,0 +1,111 @@ +# Configuring Windows runners + +## Setting up Windows Runners + +The main two steps in enabling Windows self-hosted runners are: + +- Using `nodeSelector`'s property to filter the `cert-manger` and `actions-runner-controller` pods +- Deploying a RunnerDeployment using a Windows-based image + +For the first step, you need to set the `nodeSelector.kubernetes.io/os` property in both the `cert-manager` and the `actions-runner-controller` deployments to `linux` so that the pods for these two deployments are only scheduled in Linux nodes. You can do this as follows: + +```yaml +nodeSelector: + kubernetes.io/os: linux +``` + +`cert-manager` has 4 different application within it the main application, the `webhook`, the `cainjector` and the `startupapicheck`. In the parameters or values file you use for the deployment you need to add the `nodeSelector` property four times, one for each application. + +For the `actions-runner-controller` you only have to use the `nodeSelector` only for the main deployment, so it only has to be set once. + +Once this is set up, you will need to deploy two different `RunnerDeployment`'s, one for Windows and one for Linux. +The Linux deployment can use either the default image or a custom one, however, there isn't a default Windows image so for Windows deployments you will have to build your own image. + +Below we share an example of the YAML used to create the deployment for each Operating System and a Dockerfile for the Windows deployment. + +
Windows +

+ +### RunnerDeployment + +```yaml +--- +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: k8s-runners-windows + namespace: actions-runner-system +spec: + template: + spec: + image: /: + dockerdWithinRunnerContainer: true + nodeSelector: + kubernetes.io/os: windows + kubernetes.io/arch: amd64 + repository: / + labels: + - windows + - X64 +``` + +### Dockerfile + +> Note that you'd need to patch the below Dockerfile if you need a graceful termination. +> See https://github.com/actions/actions-runner-controller/pull/1608/files#r917319574 for more information. + +```Dockerfile +FROM mcr.microsoft.com/windows/servercore:ltsc2019 + +WORKDIR /actions-runner + +SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop';$ProgressPreference='silentlyContinue';"] + +RUN Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.292.0/actions-runner-win-x64-2.292.0.zip -OutFile actions-runner-win-x64-2.292.0.zip + +RUN if((Get-FileHash -Path actions-runner-win-x64-2.292.0.zip -Algorithm SHA256).Hash.ToUpper() -ne 'f27dae1413263e43f7416d719e0baf338c8d80a366fed849ecf5fffcec1e941f'.ToUpper()){ throw 'Computed checksum did not match' } + +RUN Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('actions-runner-win-x64-2.292.0.zip', $PWD) + +RUN Invoke-WebRequest -Uri 'https://aka.ms/install-powershell.ps1' -OutFile install-powershell.ps1; ./install-powershell.ps1 -AddToPath + +RUN powershell Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + +RUN powershell choco install git.install --params "'/GitAndUnixToolsOnPath'" -y + +RUN powershell choco feature enable -n allowGlobalConfirmation + +CMD [ "pwsh", "-c", "./config.cmd --name $env:RUNNER_NAME --url https://github.com/$env:RUNNER_REPO --token $env:RUNNER_TOKEN --labels $env:RUNNER_LABELS --unattended --replace --ephemeral; ./run.cmd"] +``` +

+
+ + +
Linux +

+ +### RunnerDeployment + +```yaml +--- +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: k8s-runners-linux + namespace: actions-runner-system +spec: + template: + spec: + image: /: + nodeSelector: + kubernetes.io/os: linux + kubernetes.io/arch: amd64 + repository: : + labels: + - linux + - X64 +``` +

+
+ +After both `RunnerDeployment`'s are up and running, you can now proceed to deploy the `HorizontalRunnerAutoscaler` for each deployment. diff --git a/docs/deploying-alternative-runners.md b/docs/deploying-alternative-runners.md new file mode 100644 index 0000000000..1aef2dc250 --- /dev/null +++ b/docs/deploying-alternative-runners.md @@ -0,0 +1,62 @@ +# Deploying alternative runners + +## Alternative Runners + +ARC also offers a few alternative runner options + +### Runner with DinD + +When using the default runner, the runner pod starts up 2 containers: runner and DinD (Docker-in-Docker). ARC maintains an alternative all in one runner image with docker running in the same container as the runner. This may be prefered from a resource or complexity perspective or to be compliant with a `LimitRange` namespace configuration. + +```yaml +# dindrunnerdeployment.yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-dindrunnerdeploy +spec: + replicas: 1 + template: + spec: + image: summerwind/actions-runner-dind + dockerdWithinRunnerContainer: true + repository: mumoshu/actions-runner-controller-ci + env: [] +``` + +### Runner with rootless DinD + +When using the DinD runner, it assumes that the main runner is rootful, which can be problematic in a regulated or more security-conscious environment, such as co-tenanting across enterprise projects. The `actions-runner-dind-rootless` image runs rootless Docker inside the container as `runner` user. Note that this user does not have sudo access, so anything requiring admin privileges must be built into the runner's base image (like running `apt` to install additional software). + +### Runner with K8s Jobs + +When using the default runner, jobs that use a container will run in docker. This necessitates privileged mode, either on the runner pod or the sidecar container + +By setting the container mode, you can instead invoke these jobs using a [kubernetes implementation](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s) while not executing in privileged mode. + +The runner will dynamically spin up pods and k8s jobs in the runner's namespace to run the workflow, so a `workVolumeClaimTemplate` is required for the runner's working directory, and a service account with the [appropriate permissions.](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s#pre-requisites) + +There are some [limitations](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s#limitations) to this approach, mainly [job containers](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) are required on all workflows. + +```yaml +# runner.yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: Runner +metadata: + name: example-runner +spec: + repository: example/myrepo + containerMode: kubernetes + serviceAccountName: my-service-account + workVolumeClaimTemplate: + storageClassName: "my-dynamic-storage-class" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + env: [] +``` + + + \ No newline at end of file diff --git a/docs/deploying-arc-runners.md b/docs/deploying-arc-runners.md new file mode 100644 index 0000000000..9f83050ae1 --- /dev/null +++ b/docs/deploying-arc-runners.md @@ -0,0 +1,161 @@ +# Deploying ARC runners + +## Deploying runners with RunnerDeployments + +In our previous examples we were deploying a single runner via the `RunnerDeployment` kind, the amount of runners deployed can be statically set via the `replicas:` field, we can increase this value to deploy additional sets of runners instead: + +```yaml +# runnerdeployment.yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runnerdeploy +spec: + # This will deploy 2 runners now + replicas: 2 + template: + spec: + repository: mumoshu/actions-runner-controller-ci +``` + +Apply the manifest file to your cluster: + +```shell +$ kubectl apply -f runnerdeployment.yaml +runnerdeployment.actions.summerwind.dev/example-runnerdeploy created +``` + +You can see that 2 runners have been created as specified by `replicas: 2`: + +```shell +$ kubectl get runners +NAME REPOSITORY STATUS +example-runnerdeploy2475h595fr mumoshu/actions-runner-controller-ci Running +example-runnerdeploy2475ht2qbr mumoshu/actions-runner-controller-ci Running +``` + +## Deploying runners with RunnerSets + +> This feature requires controller version => [v0.20.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.20.0) + +We can also deploy sets of RunnerSets the same way, a basic `RunnerSet` would look like this: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerSet +metadata: + name: example +spec: + replicas: 1 + repository: mumoshu/actions-runner-controller-ci + # Other mandatory fields from StatefulSet + selector: + matchLabels: + app: example + serviceName: example + template: + metadata: + labels: + app: example +``` + +As it is based on `StatefulSet`, `selector` and `template.metadata.labels` it needs to be defined and have the exact same set of labels. `serviceName` must be set to some non-empty string as it is also required by `StatefulSet`. + +Runner-related fields like `ephemeral`, `repository`, `organization`, `enterprise`, and so on should be written directly under `spec`. + +Fields like `volumeClaimTemplates` that originates from `StatefulSet` should also be written directly under `spec`. + +Pod-related fields like security contexts and volumes are written under `spec.template.spec` like `StatefulSet`. + +Similarly, container-related fields like resource requests and limits, container image names and tags, security context, and so on are written under `spec.template.spec.containers`. There are two reserved container `name`, `runner` and `docker`. The former is for the container that runs [actions runner](https://github.com/actions/runner) and the latter is for the container that runs a `dockerd`. + +For a more complex example, see the below: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerSet +metadata: + name: example +spec: + replicas: 1 + repository: mumoshu/actions-runner-controller-ci + dockerdWithinRunnerContainer: true + template: + spec: + securityContext: + # All level/role/type/user values will vary based on your SELinux policies. + # See https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html/container_security_guide/docker_selinux_security_policy for information about SELinux with containers + seLinuxOptions: + level: "s0" + role: "system_r" + type: "super_t" + user: "system_u" + containers: + - name: runner + env: [] + resources: + limits: + cpu: "4.0" + memory: "8Gi" + requests: + cpu: "2.0" + memory: "4Gi" + # This is an advanced configuration. Don't touch it unless you know what you're doing. + securityContext: + # Usually, the runner container's privileged field is derived from dockerdWithinRunnerContainer. + # But in the case where you need to run privileged job steps even if you don't use docker/don't need dockerd within the runner container, + # just specified `privileged: true` like this. + # See https://github.com/actions/actions-runner-controller/issues/1282 + # Do note that specifying `privileged: false` while using dind is very likely to fail, even if you use some vm-based container runtimes + # like firecracker and kata. Basically they run containers within dedicated micro vms and so + # it's more like you can use `privileged: true` safer with those runtimes. + # + # privileged: true + - name: docker + resources: + limits: + cpu: "4.0" + memory: "8Gi" + requests: + cpu: "2.0" + memory: "4Gi" +``` + +You can also read the design and usage documentation written in the original pull request that introduced `RunnerSet` for more information [#629](https://github.com/actions/actions-runner-controller/pull/629). + +Under the hood, `RunnerSet` relies on Kubernetes's `StatefulSet` and Mutating Webhook. A `statefulset` is used to create a number of pods that has stable names and dynamically provisioned persistent volumes, so that each `statefulset-managed` pod gets the same persistent volume even after restarting. A mutating webhook is used to dynamically inject a runner's "registration token" which is used to call GitHub's "Create Runner" API. + +## Using persistent runners + +Every runner managed by ARC is "ephemeral" by default. The life of an ephemeral runner managed by ARC looks like this- ARC creates a runner pod for the runner. As it's an ephemeral runner, the `--ephemeral` flag is passed to the `actions/runner` agent that runs within the `runner` container of the runner pod. + +`--ephemeral` is an `actions/runner` feature that instructs the runner to stop and de-register itself after the first job run. + +Once the ephemeral runner has completed running a workflow job, it stops with a status code of 0, hence the runner pod is marked as completed, removed by ARC. + +As it's removed after a workflow job run, the runner pod is never reused across multiple GitHub Actions workflow jobs, providing you a clean environment per each workflow job. + +Although not generally recommended, it's possible to disable the passing of the `--ephemeral` flag by explicitly setting `ephemeral: false` in the `RunnerDeployment` or `RunnerSet` spec. When disabled, your runner becomes "persistent". A persistent runner does not stop after workflow job ends, and in this mode `actions/runner` is known to clean only runner's work dir after each job. Whilst this can seem helpful it creates a non-deterministic environment which is not ideal for a CI/CD environment. Between runs, your actions cache, docker images stored in the `dind` and layer cache, globally installed packages etc are retained across multiple workflow job runs which can cause issues that are hard to debug and inconsistent. + +Persistent runners are available as an option for some edge cases however they are not preferred as they can create challenges around providing a deterministic and secure environment. + +## Deploying Multiple Controllers + +> This feature requires controller version => [v0.18.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.18.0) + +**_Note: Be aware when using this feature that CRDs are cluster-wide and so you should upgrade all of your controllers (and your CRDs) at the same time if you are doing an upgrade. Do not mix and match CRD versions with different controller versions. Doing so risks out of control scaling._** + +By default the controller will look for runners in all namespaces, the watch namespace feature allows you to restrict the controller to monitoring a single namespace. This then lets you deploy multiple controllers in a single cluster. You may want to do this either because you wish to scale beyond the API rate limit of a single PAT / GitHub App configuration or you wish to support multiple GitHub organizations with runners installed at the organization level in a single cluster. + +This feature is configured via the controller's `--watch-namespace` flag. When a namespace is provided via this flag, the controller will only monitor runners in that namespace. + +You can deploy multiple controllers either in a single shared namespace, or in a unique namespace per controller. + +If you plan on installing all instances of the controller stack into a single namespace there are a few things you need to do for this to work. + +1. All resources per stack must have a unique name, in the case of Helm this can be done by giving each install a unique release name, or via the `fullnameOverride` properties. +2. `authSecret.name` needs to be unique per stack when each stack is tied to runners in different GitHub organizations and repositories AND you want your GitHub credentials to be narrowly scoped. +3. `leaderElectionId` needs to be unique per stack. If this is not unique to the stack the controller tries to race onto the leader election lock resulting in only one stack working concurrently. Your controller will be stuck with a log message something like this `attempting to acquire leader lease arc-controllers/actions-runner-controller...` +4. The MutatingWebhookConfiguration in each stack must include a namespace selector for that stack's corresponding runner namespace, this is already configured in the helm chart. + +Alternatively, you can install each controller stack into a unique namespace (relative to other controller stacks in the cluster). Implementing ARC this way avoids the first, second and third pitfalls (you still need to set the corresponding namespace selector for each stack's mutating webhook) diff --git a/docs/detailed-docs.md b/docs/detailed-docs.md deleted file mode 100644 index a8b78491f7..0000000000 --- a/docs/detailed-docs.md +++ /dev/null @@ -1,1940 +0,0 @@ -# actions-runner-controller (ARC) - -[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/6061/badge)](https://bestpractices.coreinfrastructure.org/projects/6061) -[![awesome-runners](https://img.shields.io/badge/listed%20on-awesome--runners-blue.svg)](https://github.com/jonico/awesome-runners) -[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/actions-runner-controller)](https://artifacthub.io/packages/search?repo=actions-runner-controller) - -This controller operates self-hosted runners for GitHub Actions on your Kubernetes cluster. - -ToC: - -- [People](#people) -- [Status](#status) -- [About](#about) -- [Getting Started](#getting-started) -- [Installation](#installation) - - [GitHub Enterprise Support](#github-enterprise-support) -- [Setting Up Authentication with GitHub API](#setting-up-authentication-with-github-api) - - [Deploying Using GitHub App Authentication](#deploying-using-github-app-authentication) - - [Deploying Using PAT Authentication](#deploying-using-pat-authentication) -- [Deploying Multiple Controllers](#deploying-multiple-controllers) -- [Usage](#usage) - - [Repository Runners](#repository-runners) - - [Organization Runners](#organization-runners) - - [Enterprise Runners](#enterprise-runners) - - [RunnerDeployments](#runnerdeployments) - - [RunnerSets](#runnersets) - - [Persistent Runners](#persistent-runners) - - [Autoscaling](#autoscaling) - - [Anti-Flapping Configuration](#anti-flapping-configuration) - - [Pull Driven Scaling](#pull-driven-scaling) - - [Webhook Driven Scaling](#webhook-driven-scaling) - - [Autoscaling to/from 0](#autoscaling-tofrom-0) - - [Scheduled Overrides](#scheduled-overrides) - - [Alternative Runners](#alternative-runners) - - [Runner with DinD](#runner-with-dind) - - [Runner with rootless DinD](#runner-with-rootless-dind) - - [Runner with k8s jobs](#runner-with-k8s-jobs) - - [Additional Tweaks](#additional-tweaks) - - [Runner Graceful Termination](#runner-graceful-termination) - - [Custom Volume mounts](#custom-volume-mounts) - - [Runner Labels](#runner-labels) - - [Runner Groups](#runner-groups) - - [Runner Entrypoint Features](#runner-entrypoint-features) - - [Using IRSA (IAM Roles for Service Accounts) in EKS](#using-irsa-iam-roles-for-service-accounts-in-eks) - - [Software Installed in the Runner Image](#software-installed-in-the-runner-image) - - [Using without cert-manager](#using-without-cert-manager) - - [Windows Runners](#setting-up-windows-runners) - - [Multitenancy](#multitenancy) - - [Metrics](#metrics) -- [Troubleshooting](#troubleshooting) -- [Contributing](#contributing) - - -## People - -`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions), mostly in their spare time. - -If you think the project is awesome and it's becoming a basis for your important business, consider [sponsoring us](https://github.com/sponsors/actions-runner-controller)! - -In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means! - -We don't currently have [any sponsors dedicated to this project yet](https://github.com/sponsors/actions-runner-controller). - -However, [HelloFresh](https://www.hellofreshgroup.com/en/) has recently started sponsoring @mumoshu for this project along with his other works. A part of their sponsorship will enable @mumoshu to add an E2E test to keep ARC even more reliable on AWS. Thank you for your sponsorship! - -[](https://careers.hellofresh.com/) - -## Status - -Even though actions-runner-controller is used in production environments, it is still in its early stage of development, hence versioned 0.x. - -actions-runner-controller complies to Semantic Versioning 2.0.0 in which v0.x means that there could be backward-incompatible changes for every release. - -The documentation is kept inline with master@HEAD, we do our best to highlight any features that require a specific ARC version or higher however this is not always easily done due to there being many moving parts. Additionally, we actively do not retain compatibly with every GitHub Enterprise Server version nor every Kubernetes version so you will need to ensure you stay current within a reasonable timespan. - -## About - -[GitHub Actions](https://github.com/features/actions) is a very useful tool for automating development. GitHub Actions jobs are run in the cloud by default, but you may want to run your jobs in your environment. [Self-hosted runner](https://github.com/actions/runner) can be used for such use cases, but requires the provisioning and configuration of a virtual machine instance. Instead if you already have a Kubernetes cluster, it makes more sense to run the self-hosted runner on top of it. - -**actions-runner-controller** makes that possible. Just create a *Runner* resource on your Kubernetes, and it will run and operate the self-hosted runner for the specified repository. Combined with Kubernetes RBAC, you can also build simple Self-hosted runners as a Service. - -## Getting Started -To give ARC a try with just a handful of commands, Please refer to [Quick start guide](/README.md#getting-started). - -For an overview of ARC, please refer to [ARC Overview](https://github.com/actions/actions-runner-controller/blob/master/docs/Actions-Runner-Controller-Overview.md) - -For more information, please refer to detailed documentation below! - - -## Installation - -By default, actions-runner-controller uses [cert-manager](https://cert-manager.io/docs/installation/kubernetes/) for certificate management of Admission Webhook. Make sure you have already installed cert-manager before you install. The installation instructions for the cert-manager can be found below. - -- [Installing cert-manager on Kubernetes](https://cert-manager.io/docs/installation/kubernetes/) - -After installing cert-manager, install the custom resource definitions and actions-runner-controller with `kubectl` or `helm`. This will create an actions-runner-system namespace in your Kubernetes and deploy the required resources. - -**Kubectl Deployment:** - -```shell -# REPLACE "v0.25.2" with the version you wish to deploy -kubectl create -f https://github.com/actions/actions-runner-controller/releases/download/v0.25.2/actions-runner-controller.yaml -``` - -**Helm Deployment:** - -Configure your values.yaml, see the chart's [README](../charts/actions-runner-controller/README.md) for the values documentation - -```shell -helm repo add actions-runner-controller https://actions-runner-controller.github.io/actions-runner-controller -helm upgrade --install --namespace actions-runner-system --create-namespace \ - --wait actions-runner-controller actions/actions-runner-controller -``` - -### GitHub Enterprise Support - -The solution supports both GHEC (GitHub Enterprise Cloud) and GHES (GitHub Enterprise Server) editions as well as regular GitHub. Both PAT (personal access token) and GitHub App authentication works for installations that will be deploying either repository level and / or organization level runners. If you need to deploy enterprise level runners then you are restricted to PAT based authentication as GitHub doesn't support GitHub App based authentication for enterprise runners currently. - -If you are deploying this solution into a GHES environment then you will need to be running version >= [3.6.0](https://docs.github.com/en/enterprise-server@3.6/admin/release-notes). - -When deploying the solution for a GHES environment you need to provide an additional environment variable as part of the controller deployment: - -```shell -kubectl set env deploy controller-manager -c manager GITHUB_ENTERPRISE_URL= --namespace actions-runner-system -``` - -**_Note: The repository maintainers do not have an enterprise environment (cloud or server). Support for the enterprise specific feature set is community driven and on a best effort basis. PRs from the community are welcome to add features and maintain support._** - -## Setting Up Authentication with GitHub API - -There are two ways for actions-runner-controller to authenticate with the GitHub API (only 1 can be configured at a time however): - -1. Using a GitHub App (not supported for enterprise level runners due to lack of support from GitHub) -2. Using a PAT - -Functionality wise, there isn't much of a difference between the 2 authentication methods. The primary benefit of authenticating via a GitHub App is an [increased API quota](https://docs.github.com/en/developers/apps/rate-limits-for-github-apps). - -If you are deploying the solution for a GHES environment you are able to [configure your rate limit settings](https://docs.github.com/en/enterprise-server@3.0/admin/configuration/configuring-rate-limits) making the main benefit irrelevant. If you're deploying the solution for a GHEC or regular GitHub environment and you run into rate limit issues, consider deploying the solution using the GitHub App authentication method instead. - -### Deploying Using GitHub App Authentication - -You can create a GitHub App for either your user account or any organization, below are the app permissions required for each supported type of runner: - -_Note: Links are provided further down to create an app for your logged in user account or an organization with the permissions for all runner types set in each link's query string_ - -**Required Permissions for Repository Runners:**
-**Repository Permissions** - -* Actions (read) -* Administration (read / write) -* Checks (read) (if you are going to use [Webhook Driven Scaling](#webhook-driven-scaling)) -* Metadata (read) - -**Required Permissions for Organization Runners:**
-**Repository Permissions** - -* Actions (read) -* Metadata (read) - -**Organization Permissions** - -* Self-hosted runners (read / write) - -_Note: All API routes mapped to their permissions can be found [here](https://docs.github.com/en/rest/reference/permissions-required-for-github-apps) if you wish to review_ - -**Subscribe to events** - -At this point you have a choice of configuring a webhook, a webhook is needed if you are going to use [webhook driven scaling](#webhook-driven-scaling). The webhook can be configured centrally in the GitHub app itself or separately. In either case you need to subscribe to the `Workflow Job` event. - ---- - -**Setup Steps** - -If you want to create a GitHub App for your account, open the following link to the creation page, enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page. - -- [Create GitHub Apps on your account](https://github.com/settings/apps/new?url=http://github.com/actions/actions-runner-controller&webhook_active=false&public=false&administration=write&actions=read) - -If you want to create a GitHub App for your organization, replace the `:org` part of the following URL with your organization name before opening it. Then enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page to create a GitHub App. - -- [Create GitHub Apps on your organization](https://github.com/organizations/:org/settings/apps/new?url=http://github.com/actions/actions-runner-controller&webhook_active=false&public=false&administration=write&organization_self_hosted_runners=write&actions=read&checks=read) - -You will see an *App ID* on the page of the GitHub App you created as follows, the value of this App ID will be used later. - -App ID - -Download the private key file by pushing the "Generate a private key" button at the bottom of the GitHub App page. This file will also be used later. - -Generate a private key - -Go to the "Install App" tab on the left side of the page and install the GitHub App that you created for your account or organization. - -Install App - -When the installation is complete, you will be taken to a URL in one of the following formats, the last number of the URL will be used as the Installation ID later (For example, if the URL ends in `settings/installations/12345`, then the Installation ID is `12345`). - -- `https://github.com/settings/installations/${INSTALLATION_ID}` -- `https://github.com/organizations/eventreactor/settings/installations/${INSTALLATION_ID}` - - -Finally, register the App ID (`APP_ID`), Installation ID (`INSTALLATION_ID`), and the downloaded private key file (`PRIVATE_KEY_FILE_PATH`) to Kubernetes as a secret. - -**Kubectl Deployment:** - -```shell -$ kubectl create secret generic controller-manager \ - -n actions-runner-system \ - --from-literal=github_app_id=${APP_ID} \ - --from-literal=github_app_installation_id=${INSTALLATION_ID} \ - --from-file=github_app_private_key=${PRIVATE_KEY_FILE_PATH} -``` - -**Helm Deployment:** - -Configure your values.yaml, see the chart's [README](../charts/actions-runner-controller/README.md) for deploying the secret via Helm - -### Deploying Using PAT Authentication - -Personal Access Tokens can be used to register a self-hosted runner by *actions-runner-controller*. - -Log-in to a GitHub account that has `admin` privileges for the repository, and [create a personal access token](https://github.com/settings/tokens/new) with the appropriate scopes listed below: - -**Required Scopes for Repository Runners** - -* repo (Full control) - -**Required Scopes for Organization Runners** - -* repo (Full control) -* admin:org (Full control) -* admin:public_key (read:public_key) -* admin:repo_hook (read:repo_hook) -* admin:org_hook (Full control) -* notifications (Full control) -* workflow (Full control) - -**Required Scopes for Enterprise Runners** - -* admin:enterprise (manage_runners:enterprise) - -_Note: When you deploy enterprise runners they will get access to organizations, however, access to the repositories themselves is **NOT** allowed by default. Each GitHub organization must allow enterprise runner groups to be used in repositories as an initial one-time configuration step, this only needs to be done once after which it is permanent for that runner group._ - -_Note: GitHub does not document exactly what permissions you get with each PAT scope beyond a vague description. The best documentation they provide on the topic can be found [here](https://docs.github.com/en/developers/apps/building-oauth-apps/scopes-for-oauth-apps) if you wish to review. The docs target OAuth apps and so are incomplete and may not be 100% accurate._ - ---- - -Once you have created the appropriate token, deploy it as a secret to your Kubernetes cluster that you are going to deploy the solution on: - -**Kubectl Deployment:** - -```shell -kubectl create secret generic controller-manager \ - -n actions-runner-system \ - --from-literal=github_token=${GITHUB_TOKEN} -``` - -**Helm Deployment:** - -Configure your values.yaml, see the chart's [README](../charts/actions-runner-controller/README.md) for deploying the secret via Helm - -### Deploying Multiple Controllers - -> This feature requires controller version => [v0.18.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.18.0) - -**_Note: Be aware when using this feature that CRDs are cluster-wide and so you should upgrade all of your controllers (and your CRDs) at the same time if you are doing an upgrade. Do not mix and match CRD versions with different controller versions. Doing so risks out of control scaling._** - -By default the controller will look for runners in all namespaces, the watch namespace feature allows you to restrict the controller to monitoring a single namespace. This then lets you deploy multiple controllers in a single cluster. You may want to do this either because you wish to scale beyond the API rate limit of a single PAT / GitHub App configuration or you wish to support multiple GitHub organizations with runners installed at the organization level in a single cluster. - -This feature is configured via the controller's `--watch-namespace` flag. When a namespace is provided via this flag, the controller will only monitor runners in that namespace. - -You can deploy multiple controllers either in a single shared namespace, or in a unique namespace per controller. - -If you plan on installing all instances of the controller stack into a single namespace there are a few things you need to do for this to work. - -1. All resources per stack must have a unique name, in the case of Helm this can be done by giving each install a unique release name, or via the `fullnameOverride` properties. -2. `authSecret.name` needs to be unique per stack when each stack is tied to runners in different GitHub organizations and repositories AND you want your GitHub credentials to be narrowly scoped. -3. `leaderElectionId` needs to be unique per stack. If this is not unique to the stack the controller tries to race onto the leader election lock resulting in only one stack working concurrently. Your controller will be stuck with a log message something like this `attempting to acquire leader lease arc-controllers/actions-runner-controller...` -4. The MutatingWebhookConfiguration in each stack must include a namespace selector for that stack's corresponding runner namespace, this is already configured in the helm chart. - -Alternatively, you can install each controller stack into a unique namespace (relative to other controller stacks in the cluster). Implementing ARC this way avoids the first, second and third pitfalls (you still need to set the corresponding namespace selector for each stack's mutating webhook) - -## Usage - -[GitHub self-hosted runners can be deployed at various levels in a management hierarchy](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners#about-self-hosted-runners): -- The repository level -- The organization level -- The enterprise level - -Runners can be deployed as 1 of 2 abstractions: - -- A `RunnerDeployment` (similar to k8s's `Deployments`, based on `Pods`) -- A `RunnerSet` (based on k8s's `StatefulSets`) - -We go into details about the differences between the 2 later, initially lets look at how to deploy a basic `RunnerDeployment` at the 3 possible management hierarchies. - -### Repository Runners - -To launch a single self-hosted runner, you need to create a manifest file that includes a `RunnerDeployment` resource as follows. This example launches a self-hosted runner with name *example-runnerdeploy* for the *actions/actions-runner-controller* repository. - -```yaml -# runnerdeployment.yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-runnerdeploy -spec: - replicas: 1 - template: - spec: - repository: mumoshu/actions-runner-controller-ci -``` - -Apply the created manifest file to your Kubernetes. - -```shell -$ kubectl apply -f runnerdeployment.yaml -runnerdeployment.actions.summerwind.dev/example-runnerdeploy created -``` - -You can see that 1 runner and its underlying pod has been created as specified by `replicas: 1` attribute: - -```shell -$ kubectl get runners -NAME REPOSITORY STATUS -example-runnerdeploy2475h595fr mumoshu/actions-runner-controller-ci Running - -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -example-runnerdeploy2475ht2qbr 2/2 Running 0 1m -``` - -The runner you created has been registered directly to the defined repository, you should be able to see it in the settings of the repository. - -Now you can use your self-hosted runner. See the [official documentation](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/using-self-hosted-runners-in-a-workflow) on how to run a job with it. - -### Organization Runners - -To add the runner to an organization, you only need to replace the `repository` field with `organization`, so the runner will register itself to the organization. - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-runnerdeploy -spec: - replicas: 1 - template: - spec: - organization: your-organization-name -``` - -Now you can see the runner on the organization level (if you have organization owner permissions). - -### Enterprise Runners - -To add the runner to an enterprise, you only need to replace the `repository` field with `enterprise`, so the runner will register itself to the enterprise. - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-runnerdeploy -spec: - replicas: 1 - template: - spec: - enterprise: your-enterprise-name -``` - -Now you can see the runner on the enterprise level (if you have enterprise access permissions). - -### RunnerDeployments - -In our previous examples we were deploying a single runner via the `RunnerDeployment` kind, the amount of runners deployed can be statically set via the `replicas:` field, we can increase this value to deploy additional sets of runners instead: - -```yaml -# runnerdeployment.yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-runnerdeploy -spec: - # This will deploy 2 runners now - replicas: 2 - template: - spec: - repository: mumoshu/actions-runner-controller-ci -``` - -Apply the manifest file to your cluster: - -```shell -$ kubectl apply -f runnerdeployment.yaml -runnerdeployment.actions.summerwind.dev/example-runnerdeploy created -``` - -You can see that 2 runners have been created as specified by `replicas: 2`: - -```shell -$ kubectl get runners -NAME REPOSITORY STATUS -example-runnerdeploy2475h595fr mumoshu/actions-runner-controller-ci Running -example-runnerdeploy2475ht2qbr mumoshu/actions-runner-controller-ci Running -``` - -### RunnerSets - -> This feature requires controller version => [v0.20.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.20.0) - -We can also deploy sets of RunnerSets the same way, a basic `RunnerSet` would look like this: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerSet -metadata: - name: example -spec: - replicas: 1 - repository: mumoshu/actions-runner-controller-ci - # Other mandatory fields from StatefulSet - selector: - matchLabels: - app: example - serviceName: example - template: - metadata: - labels: - app: example -``` - -As it is based on `StatefulSet`, `selector` and `template.metadata.labels` it needs to be defined and have the exact same set of labels. `serviceName` must be set to some non-empty string as it is also required by `StatefulSet`. - -Runner-related fields like `ephemeral`, `repository`, `organization`, `enterprise`, and so on should be written directly under `spec`. - -Fields like `volumeClaimTemplates` that originates from `StatefulSet` should also be written directly under `spec`. - -Pod-related fields like security contexts and volumes are written under `spec.template.spec` like `StatefulSet`. - -Similarly, container-related fields like resource requests and limits, container image names and tags, security context, and so on are written under `spec.template.spec.containers`. There are two reserved container `name`, `runner` and `docker`. The former is for the container that runs [actions runner](https://github.com/actions/runner) and the latter is for the container that runs a `dockerd`. - -For a more complex example, see the below: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerSet -metadata: - name: example -spec: - replicas: 1 - repository: mumoshu/actions-runner-controller-ci - dockerdWithinRunnerContainer: true - template: - spec: - securityContext: - # All level/role/type/user values will vary based on your SELinux policies. - # See https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html/container_security_guide/docker_selinux_security_policy for information about SELinux with containers - seLinuxOptions: - level: "s0" - role: "system_r" - type: "super_t" - user: "system_u" - containers: - - name: runner - env: [] - resources: - limits: - cpu: "4.0" - memory: "8Gi" - requests: - cpu: "2.0" - memory: "4Gi" - # This is an advanced configuration. Don't touch it unless you know what you're doing. - securityContext: - # Usually, the runner container's privileged field is derived from dockerdWithinRunnerContainer. - # But in the case where you need to run privileged job steps even if you don't use docker/don't need dockerd within the runner container, - # just specified `privileged: true` like this. - # See https://github.com/actions/actions-runner-controller/issues/1282 - # Do note that specifying `privileged: false` while using dind is very likely to fail, even if you use some vm-based container runtimes - # like firecracker and kata. Basically they run containers within dedicated micro vms and so - # it's more like you can use `privileged: true` safer with those runtimes. - # - # privileged: true - - name: docker - resources: - limits: - cpu: "4.0" - memory: "8Gi" - requests: - cpu: "2.0" - memory: "4Gi" -``` - -You can also read the design and usage documentation written in the original pull request that introduced `RunnerSet` for more information [#629](https://github.com/actions/actions-runner-controller/pull/629). - -Under the hood, `RunnerSet` relies on Kubernetes's `StatefulSet` and Mutating Webhook. A `statefulset` is used to create a number of pods that has stable names and dynamically provisioned persistent volumes, so that each `statefulset-managed` pod gets the same persistent volume even after restarting. A mutating webhook is used to dynamically inject a runner's "registration token" which is used to call GitHub's "Create Runner" API. - -### Persistent Runners - -Every runner managed by ARC is "ephemeral" by default. The life of an ephemeral runner managed by ARC looks like this- ARC creates a runner pod for the runner. As it's an ephemeral runner, the `--ephemeral` flag is passed to the `actions/runner` agent that runs within the `runner` container of the runner pod. - -`--ephemeral` is an `actions/runner` feature that instructs the runner to stop and de-register itself after the first job run. - -Once the ephemeral runner has completed running a workflow job, it stops with a status code of 0, hence the runner pod is marked as completed, removed by ARC. - -As it's removed after a workflow job run, the runner pod is never reused across multiple GitHub Actions workflow jobs, providing you a clean environment per each workflow job. - -Although not generally recommended, it's possible to disable the passing of the `--ephemeral` flag by explicitly setting `ephemeral: false` in the `RunnerDeployment` or `RunnerSet` spec. When disabled, your runner becomes "persistent". A persistent runner does not stop after workflow job ends, and in this mode `actions/runner` is known to clean only runner's work dir after each job. Whilst this can seem helpful it creates a non-deterministic environment which is not ideal for a CI/CD environment. Between runs, your actions cache, docker images stored in the `dind` and layer cache, globally installed packages etc are retained across multiple workflow job runs which can cause issues that are hard to debug and inconsistent. - -Persistent runners are available as an option for some edge cases however they are not preferred as they can create challenges around providing a deterministic and secure environment. - -### Autoscaling - -> If you are using controller version < [v0.22.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.22.0) and you are not using GHES, and so you can't set your rate limit budget, it is recommended that you use 100 replicas or fewer to prevent being rate limited. - -A `RunnerDeployment` or `RunnerSet` can scale the number of runners between `minReplicas` and `maxReplicas` fields driven by either pull based scaling metrics or via a webhook event. Whether the autoscaling is driven from a webhook event or pull based metrics it is implemented by backing a `RunnerDeployment` or `RunnerSet` kind with a `HorizontalRunnerAutoscaler` kind. - -**_Important!!! If you opt to configure autoscaling, ensure you remove the `replicas:` attribute in the `RunnerDeployment` / `RunnerSet` kinds that are configured for autoscaling [#206](https://github.com/actions/actions-runner-controller/issues/206#issuecomment-748601907)_** - -#### Anti-Flapping Configuration - -For both pull driven or webhook driven scaling an anti-flapping implementation is included, by default a runner won't be scaled down within 10 minutes of it having been scaled up. - -This anti-flap configuration also has the final say on if a runner can be scaled down or not regardless of the chosen scaling method. - -This delay is configurable via 2 methods: - -1. By setting a new default via the controller's `--default-scale-down-delay` flag -2. By setting by setting the attribute `scaleDownDelaySecondsAfterScaleOut:` in a `HorizontalRunnerAutoscaler` kind's `spec:`. - -Below is a complete basic example of one of the pull driven scaling metrics. - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-runner-deployment -spec: - template: - spec: - repository: example/myrepo ---- -apiVersion: actions.summerwind.dev/v1alpha1 -kind: HorizontalRunnerAutoscaler -metadata: - name: example-runner-deployment-autoscaler -spec: - # Runners in the targeted RunnerDeployment won't be scaled down - # for 5 minutes instead of the default 10 minutes now - scaleDownDelaySecondsAfterScaleOut: 300 - scaleTargetRef: - kind: RunnerDeployment - # # In case the scale target is RunnerSet: - # kind: RunnerSet - name: example-runner-deployment - minReplicas: 1 - maxReplicas: 5 - metrics: - - type: PercentageRunnersBusy - scaleUpThreshold: '0.75' - scaleDownThreshold: '0.25' - scaleUpFactor: '2' - scaleDownFactor: '0.5' -``` - -#### Pull Driven Scaling - -> To configure webhook driven scaling see the [Webhook Driven Scaling](#webhook-driven-scaling) section - -The pull based metrics are configured in the `metrics` attribute of a HRA (see snippet below). The period between polls is defined by the controller's `--sync-period` flag. If this flag isn't provided then the controller defaults to a sync period of `1m`, this can be configured in seconds or minutes. - -Be aware that the shorter the sync period the quicker you will consume your rate limit budget, depending on your environment this may or may not be a risk. Consider monitoring ARCs rate limit budget when configuring this feature to find the optimal performance sync period. - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: HorizontalRunnerAutoscaler -metadata: - name: example-runner-deployment-autoscaler -spec: - scaleTargetRef: - kind: RunnerDeployment - # # In case the scale target is RunnerSet: - # kind: RunnerSet - name: example-runner-deployment - minReplicas: 1 - maxReplicas: 5 - # Your chosen scaling metrics here - metrics: [] -``` - -**Metric Options:** - -**TotalNumberOfQueuedAndInProgressWorkflowRuns** - -The `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric polls GitHub for all pending workflow runs against a given set of repositories. The metric will scale the runner count up to the total number of pending jobs at the sync time up to the `maxReplicas` configuration. - -**Benefits of this metric** -1. Supports named repositories allowing you to restrict the runner to a specified set of repositories server-side. -2. Scales the runner count based on the depth of the job queue meaning a 1:1 scaling of runners to queued jobs. -3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [GitHub labels](#runner-labels). - -**Drawbacks of this metric** -1. A list of repositories must be included within the scaling metric. Maintaining a list of repositories may not be viable in larger environments or self-serve environments. -2. May not scale quickly enough for some users' needs. This metric is pull based and so the queue depth is polled as configured by the sync period, as a result scaling performance is bound by this sync period meaning there is a lag to scaling activity. -3. Relatively large amounts of API requests are required to maintain this metric, you may run into API rate limit issues depending on the size of your environment and how aggressive your sync period configuration is. - -Example `RunnerDeployment` backed by a `HorizontalRunnerAutoscaler`: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-runner-deployment -spec: - template: - spec: - repository: example/myrepo ---- -apiVersion: actions.summerwind.dev/v1alpha1 -kind: HorizontalRunnerAutoscaler -metadata: - name: example-runner-deployment-autoscaler -spec: - scaleTargetRef: - kind: RunnerDeployment - # # In case the scale target is RunnerSet: - # kind: RunnerSet - name: example-runner-deployment - minReplicas: 1 - maxReplicas: 5 - metrics: - - type: TotalNumberOfQueuedAndInProgressWorkflowRuns - repositoryNames: - # A repository name is the REPO part of `github.com/OWNER/REPO` - - myrepo -``` - -**PercentageRunnersBusy** - -The `HorizontalRunnerAutoscaler` will poll GitHub for the number of runners in the `busy` state which live in the RunnerDeployment's namespace, it will then scale depending on how you have configured the scale factors. - -**Benefits of this metric** -1. Supports named repositories server-side the same as the `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric [#313](https://github.com/actions/actions-runner-controller/pull/313) -2. Supports GitHub organization wide scaling without maintaining an explicit list of repositories, this is especially useful for those that are working at a larger scale. [#223](https://github.com/actions/actions-runner-controller/pull/223) -3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [GitHub labels](#runner-labels) -4. Supports scaling desired runner count on both a percentage increase / decrease basis as well as on a fixed increase / decrease count basis [#223](https://github.com/actions/actions-runner-controller/pull/223) [#315](https://github.com/actions/actions-runner-controller/pull/315) - -**Drawbacks of this metric** -1. May not scale quickly enough for some users' needs. This metric is pull based and so the number of busy runners is polled as configured by the sync period, as a result scaling performance is bound by this sync period meaning there is a lag to scaling activity. -2. We are scaling up and down based on indicative information rather than a count of the actual number of queued jobs and so the desired runner count is likely to under provision new runners or overprovision them relative to actual job queue depth, this may or may not be a problem for you. - -Examples of each scaling type implemented with a `RunnerDeployment` backed by a `HorizontalRunnerAutoscaler`: - -```yaml ---- -apiVersion: actions.summerwind.dev/v1alpha1 -kind: HorizontalRunnerAutoscaler -metadata: - name: example-runner-deployment-autoscaler -spec: - scaleTargetRef: - kind: RunnerDeployment - # # In case the scale target is RunnerSet: - # kind: RunnerSet - name: example-runner-deployment - minReplicas: 1 - maxReplicas: 5 - metrics: - - type: PercentageRunnersBusy - scaleUpThreshold: '0.75' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale up - scaleDownThreshold: '0.3' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale down - scaleUpFactor: '1.4' # The scale up multiplier factor applied to desired count - scaleDownFactor: '0.7' # The scale down multiplier factor applied to desired count -``` - -```yaml ---- -apiVersion: actions.summerwind.dev/v1alpha1 -kind: HorizontalRunnerAutoscaler -metadata: - name: example-runner-deployment-autoscaler -spec: - scaleTargetRef: - kind: RunnerDeployment - # # In case the scale target is RunnerSet: - # kind: RunnerSet - name: example-runner-deployment - minReplicas: 1 - maxReplicas: 5 - metrics: - - type: PercentageRunnersBusy - scaleUpThreshold: '0.75' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale up - scaleDownThreshold: '0.3' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale down - scaleUpAdjustment: 2 # The scale up runner count added to desired count - scaleDownAdjustment: 1 # The scale down runner count subtracted from the desired count -``` - -#### Webhook Driven Scaling - -> This feature requires controller version => [v0.20.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.20.0) - -> To configure pull driven scaling see the [Pull Driven Scaling](#pull-driven-scaling) section - -Alternatively ARC can be configured to scale based on the `workflow_job` webhook event. The primary benefit of autoscaling on webhooks compared to the pull driven scaling is that ARC is immediately notified of the scaling need. - -Webhooks are processed by a separate webhook server. The webhook server receives `workflow_job` webhook events and scales RunnerDeployments / RunnerSets by updating HRAs configured for the webhook trigger. Below is an example set-up where a HRA has been configured to scale a `RunnerDeployment` from a `workflow_job` event: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-runners -spec: - template: - spec: - repository: example/myrepo ---- -apiVersion: actions.summerwind.dev/v1alpha1 -kind: HorizontalRunnerAutoscaler -metadata: - name: example-runners -spec: - minReplicas: 1 - maxReplicas: 10 - scaleTargetRef: - kind: RunnerDeployment - # # In case the scale target is RunnerSet: - # kind: RunnerSet - name: example-runners - scaleUpTriggers: - - githubEvent: - workflowJob: {} - duration: "30m" -``` - -The lifecycle of a runner provisioned from a webhook is different to a runner provisioned from the pull based scaling method: - -1. GitHub sends a `workflow_job` event to ARC with `status=queued` -2. ARC finds a HRA with a `workflow_job` webhook scale trigger that backs a RunnerDeployment / RunnerSet with matching runner labels -3. The matched HRA adds a unit to its `capacityReservations` list -4. ARC adds a replica and sets the EffectiveTime of that replica to current + `HRA.spec.scaleUpTriggers[].duration` - -At this point there are a few things that can happen, either the job gets allocated to the runner or the runner is left dangling due to it not being used, if the runner gets assigned the job that triggered the scale up the lifecycle looks like this: - -1. The new runner gets allocated the job and processes it -2. Upon the job ending GitHub sends another `workflow_job` event to ARC but with `status=completed` -3. The HRA removes the oldest capacity reservation from its `capacityReservations` and picks a runner to terminate ensuring it isn't busy via the GitHub API beforehand - -If the job is cancelled before it is allocated to a runner then the lifecycle looks like this: - -1. Upon the job cancellation GitHub sends another `workflow_job` event to ARC but with `status=cancelled` -2. The HRA removes the oldest capacity reservation from its `capacityReservations` and picks a runner to terminate ensuring it isn't busy via the GitHub API beforehand - -If runner is never used due to other runners matching needed runner group and required runner labels are allocated the job then the lifecycle looks like this: - -1. The scale trigger duration specified via `HRA.spec.scaleUpTriggers[].duration` elapses -2. The HRA thinks the capacity reservation is expired, removes it from HRA's `capacityReservations` and terminates the expired runner ensuring it isn't busy via the GitHub API beforehand - -Your `HRA.spec.scaleUpTriggers[].duration` value should be set long enough to account for the following things: - -1. the potential amount of time it could take for a pod to become `Running` e.g. you need to scale horizontally because there isn't a node avaliable -2. the amount of time it takes for GitHub to allocate a job to that runner -3. the amount of time it takes for the runner to notice the allocated job and starts running it - -##### Install with Helm - -To enable this feature, you first need to install the GitHub webhook server. To install via our Helm chart, -_[see the values documentation for all configuration options](../charts/actions-runner-controller/README.md)_ - -```console -$ helm upgrade --install --namespace actions-runner-system --create-namespace \ - --wait actions-runner-controller actions/actions-runner-controller \ - --set "githubWebhookServer.enabled=true,service.type=NodePort,githubWebhookServer.ports[0].nodePort=33080" -``` - -The above command will result in exposing the node port 33080 for Webhook events. -Usually, you need to create an external load balancer targeted to the node port, -and register the hostname or the IP address of the external load balancer to the GitHub Webhook. - -**With a custom Kubernetes ingress controller:** - -> **CAUTION:** The Kubernetes ingress controllers described below is just a suggestion from the community and -> the ARC team will not provide any user support for ingress controllers as it's not a part of this project. -> -> The following guide on creating an ingress has been contributed by the awesome ARC community and is provided here as-is. -> You may, however, still be able to ask for help on the community on GitHub Discussions if you have any problems. - -Kubernetes provides `Ingress` resources to let you configure your ingress controller to expose a Kubernetes service. -If you plan to expose ARC via Ingress, you might not be required to make it a `NodePort` service -(although nothing would prevent an ingress controller to expose NodePort services too): - -```console -$ helm upgrade --install --namespace actions-runner-system --create-namespace \ - --wait actions-runner-controller actions/actions-runner-controller \ - --set "githubWebhookServer.enabled=true" -``` - -The command above will create a new deployment and a service for receiving Github Webhooks on the `actions-runner-system` namespace. - -Now we need to expose this service so that GitHub can send these webhooks over the network with TLS protection. - -You can do it in any way you prefer, here we'll suggest doing it with a k8s Ingress. -For the sake of this example we'll expose this service on the following URL: - -- https://your.domain.com/actions-runner-controller-github-webhook-server - -Where `your.domain.com` should be replaced by your own domain. - -> Note: This step assumes you already have a configured `cert-manager` and domain name for your cluster. - -Let's start by creating an Ingress file called `arc-webhook-server.yaml` with the following contents: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: actions-runner-controller-github-webhook-server - namespace: actions-runner-system - annotations: - kubernetes.io/ingress.class: nginx - nginx.ingress.kubernetes.io/backend-protocol: "HTTP" -spec: - tls: - - hosts: - - your.domain.com - secretName: your-tls-secret-name - rules: - - http: - paths: - - path: /actions-runner-controller-github-webhook-server - pathType: Prefix - backend: - service: - name: actions-runner-controller-github-webhook-server - port: - number: 80 -``` - -Make sure to set the `spec.tls.secretName` to the name of your TLS secret and -`spec.tls.hosts[0]` to your own domain. - -Then create this resource on your cluster with the following command: - -```bash -kubectl apply -n actions-runner-system -f arc-webhook-server.yaml -``` - -**Configuring GitHub for sending webhooks for our newly created webhook server:** - -After this step your webhook server should be ready to start receiving webhooks from GitHub. - -To configure GitHub to start sending you webhooks, go to the settings page of your repository -or organization then click on `Webhooks`, then on `Add webhook`. - -There set the "Payload URL" field with the webhook URL you just created, -if you followed the example ingress above the URL would be something like this: - -- https://your.domain.com/actions-runner-controller-github-webhook-server - -> Remember to replace `your.domain.com` with your own domain. - -Then click on "Content type" and choose `application/json`. - -Then click on "let me select individual events" and choose `Workflow Jobs`. - -Then click on `Add Webhook`. - -GitHub will then send a `ping` event to your webhook server to check if it is working, if it is you'll see a green V mark -alongside your webhook on the Settings -> Webhooks page. - -Once you were able to confirm that the Webhook server is ready and running from GitHub create or update your -`HorizontalRunnerAutoscaler` resources by learning the following configuration examples. - -##### Install with Kustomize - -To install this feature using Kustomize, add `github-webhook-server` resources to your `kustomization.yaml` file as in the example below: - -```yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -# You should already have this -- github.com/actions/actions-runner-controller/config//default?ref=v0.22.2 -# Add the below! -- github.com/actions/actions-runner-controller/config//github-webhook-server?ref=v0.22.2 - -Finally, you will have to configure an ingress so that you may configure the webhook in github. An example of such ingress can be find below: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: actions-runners-webhook-server -spec: - rules: - - http: - paths: - - path: / - backend: - service: - name: github-webhook-server - port: - number: 80 - pathType: Exact - -``` - -#### Autoscaling to/from 0 - -> This feature requires controller version => [v0.19.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.19.0) - -The regular `RunnerDeployment` / `RunnerSet` `replicas:` attribute as well as the `HorizontalRunnerAutoscaler` `minReplicas:` attribute supports being set to 0. - -The main use case for scaling from 0 is with the `HorizontalRunnerAutoscaler` kind. To scale from 0 whilst still being able to provision runners as jobs are queued we must use the `HorizontalRunnerAutoscaler` with only certain scaling configurations, only the below configurations support scaling from 0 whilst also being able to provision runners as jobs are queued: - -- `TotalNumberOfQueuedAndInProgressWorkflowRuns` -- `PercentageRunnersBusy` + `TotalNumberOfQueuedAndInProgressWorkflowRuns` -- Webhook-based autoscaling - -`PercentageRunnersBusy` can't be used alone for scale-from-zero as, by its definition, it needs one or more GitHub runners to become `busy` to be able to scale. If there isn't a runner to pick up a job and enter a `busy` state then the controller will never know to provision a runner to begin with as this metric has no knowledge of the job queue and is relying on using the number of busy runners as a means for calculating the desired replica count. - -If a HorizontalRunnerAutoscaler is configured with a secondary metric of `TotalNumberOfQueuedAndInProgressWorkflowRuns` then be aware that the controller will check the primary metric of `PercentageRunnersBusy` first and will only use the secondary metric to calculate the desired replica count if the primary metric returns 0 desired replicas. - -Webhook-based autoscaling is the best option as it is relatively easy to configure and also it can scale quickly. - -#### Scheduled Overrides - -> This feature requires controller version => [v0.19.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.19.0) - -`Scheduled Overrides` allows you to configure `HorizontalRunnerAutoscaler` so that its `spec:` gets updated only during a certain period of time. This feature is usually used for the following scenarios: - -- You want to reduce your infrastructure costs by scaling your Kubernetes nodes down outside a given period -- You want to scale for scheduled spikes in workloads - -The most basic usage of this feature is to set a non-repeating override: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: HorizontalRunnerAutoscaler -metadata: - name: example-runner-deployment-autoscaler -spec: - scaleTargetRef: - kind: RunnerDeployment - # # In case the scale target is RunnerSet: - # kind: RunnerSet - name: example-runner-deployment - scheduledOverrides: - # Override minReplicas to 100 only between 2021-06-01T00:00:00+09:00 and 2021-06-03T00:00:00+09:00 - - startTime: "2021-06-01T00:00:00+09:00" - endTime: "2021-06-03T00:00:00+09:00" - minReplicas: 100 - minReplicas: 1 -``` - -A scheduled override without `recurrenceRule` is considered a one-off override, that is active between `startTime` and `endTime`. In the second scenario, it overrides `minReplicas` to `100` only between `2021-06-01T00:00:00+09:00` and `2021-06-03T00:00:00+09:00`. - -A more advanced configuration is to include a `recurrenceRule` in the override: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: HorizontalRunnerAutoscaler -metadata: - name: example-runner-deployment-autoscaler -spec: - scaleTargetRef: - kind: RunnerDeployment - # # In case the scale target is RunnerSet: - # kind: RunnerSet - name: example-runner-deployment - scheduledOverrides: - # Override minReplicas to 0 only between 0am sat to 0am mon - - startTime: "2021-05-01T00:00:00+09:00" - endTime: "2021-05-03T00:00:00+09:00" - recurrenceRule: - frequency: Weekly - # Optional sunset datetime attribute - # untilTime: "2022-05-01T00:00:00+09:00" - minReplicas: 0 - minReplicas: 1 -``` - - A recurring override is initially active between `startTime` and `endTime`, and then it repeatedly gets activated after a certain period of time denoted by `frequency`. - -`frequecy` can take one of the following values: - -- `Daily` -- `Weekly` -- `Monthly` -- `Yearly` - -By default, a scheduled override repeats forever. If you want it to repeat until a specific point in time, define `untilTime`. The controller creates the last recurrence of the override until the recurrence's `startTime` is equal or earlier than `untilTime`. - -Do ensure that you have enough slack for `untilTime` so that a delayed or offline `actions-runner-controller` is much less likely to miss the last recurrence. For example, you might want to set `untilTime` to `M` minutes after the last recurrence's `startTime`, so that `actions-runner-controller` being offline up to `M` minutes doesn't miss the last recurrence. - -**Combining Multiple Scheduled Overrides**: - -In case you have a more complex scenario, try writing two or more entries under `scheduledOverrides`. - -The earlier entry is prioritized higher than later entries. So you usually define one-time overrides at the top of your list, then yearly, monthly, weekly, and lastly daily overrides. - -A common use case for this may be to have 1 override to scale to 0 during the week outside of core business hours and another override to scale to 0 during all hours of the weekend. - -### Alternative Runners - -ARC also offers a few alternative runner options - -#### Runner with DinD - -When using the default runner, the runner pod starts up 2 containers: runner and DinD (Docker-in-Docker). ARC maintains an alternative all in one runner image with docker running in the same container as the runner. This may be prefered from a resource or complexity perspective or to be compliant with a `LimitRange` namespace configuration. - -```yaml -# dindrunnerdeployment.yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-dindrunnerdeploy -spec: - replicas: 1 - template: - spec: - image: summerwind/actions-runner-dind - dockerdWithinRunnerContainer: true - repository: mumoshu/actions-runner-controller-ci - env: [] -``` - -#### Runner with rootless DinD - -When using the DinD runner, it assumes that the main runner is rootful, which can be problematic in a regulated or more security-conscious environment, such as co-tenanting across enterprise projects. The `actions-runner-dind-rootless` image runs rootless Docker inside the container as `runner` user. Note that this user does not have sudo access, so anything requiring admin privileges must be built into the runner's base image (like running `apt` to install additional software). - -#### Runner with K8s Jobs - -When using the default runner, jobs that use a container will run in docker. This necessitates privileged mode, either on the runner pod or the sidecar container - -By setting the container mode, you can instead invoke these jobs using a [kubernetes implementation](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s) while not executing in privileged mode. - -The runner will dynamically spin up pods and k8s jobs in the runner's namespace to run the workflow, so a `workVolumeClaimTemplate` is required for the runner's working directory, and a service account with the [appropriate permissions.](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s#pre-requisites) - -There are some [limitations](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s#limitations) to this approach, mainly [job containers](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) are required on all workflows. - -```yaml -# runner.yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: Runner -metadata: - name: example-runner -spec: - repository: example/myrepo - containerMode: kubernetes - serviceAccountName: my-service-account - workVolumeClaimTemplate: - storageClassName: "my-dynamic-storage-class" - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi - env: [] -``` - -### Additional Tweaks - -You can pass details through the spec selector. Here's an eg. of what you may like to do: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: actions-runner - namespace: default -spec: - replicas: 2 - template: - metadata: - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "true" - spec: - priorityClassName: "high" - nodeSelector: - node-role.kubernetes.io/test: "" - - securityContext: - #All level/role/type/user values will vary based on your SELinux policies. - #See https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html/container_security_guide/docker_selinux_security_policy for information about SELinux with containers - seLinuxOptions: - level: "s0" - role: "system_r" - type: "super_t" - user: "system_u" - - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/test - operator: Exists - - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: ScheduleAnyway - labelSelector: - matchLabels: - runner-deployment-name: actions-runner - - repository: mumoshu/actions-runner-controller-ci - # The default "summerwind/actions-runner" images are available at DockerHub: - # https://hub.docker.com/r/summerwind/actions-runner - # You can also build your own and specify it like the below: - image: custom-image/actions-runner:latest - imagePullPolicy: Always - resources: - limits: - cpu: "4.0" - memory: "8Gi" - requests: - cpu: "2.0" - memory: "4Gi" - # Timeout after a node crashed or became unreachable to evict your pods somewhere else (default 5mins) - tolerations: - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationSeconds: 10 - # true (default) = The runner restarts after running jobs, to ensure a clean and reproducible build environment - # false = The runner is persistent across jobs and doesn't automatically restart - # This directly controls the behaviour of `--once` flag provided to the github runner - ephemeral: false - # true (default) = A privileged docker sidecar container is included in the runner pod. - # false = A docker sidecar container is not included in the runner pod and you can't use docker. - # If set to false, there are no privileged container and you cannot use docker. - dockerEnabled: false - # Optional Docker containers network MTU - # If your network card MTU is smaller than Docker's default 1500, you might encounter Docker networking issues. - # To fix these issues, you should setup Docker MTU smaller than or equal to that on the outgoing network card. - # More information: - # - https://mlohr.com/docker-mtu/ - dockerMTU: 1500 - # Optional Docker registry mirror - # Docker Hub has an aggressive rate-limit configuration for free plans. - # To avoid disruptions in your CI/CD pipelines, you might want to setup an external or on-premises Docker registry mirror. - # More information: - # - https://docs.docker.com/docker-hub/download-rate-limit/ - # - https://cloud.google.com/container-registry/docs/pulling-cached-images - dockerRegistryMirror: https://mirror.gcr.io/ - # false (default) = Docker support is provided by a sidecar container deployed in the runner pod. - # true = No docker sidecar container is deployed in the runner pod but docker can be used within the runner container instead. The image summerwind/actions-runner-dind is used by default. - dockerdWithinRunnerContainer: true - #Optional environment variables for docker container - # Valid only when dockerdWithinRunnerContainer=false - dockerEnv: - - name: HTTP_PROXY - value: http://example.com - # Docker sidecar container image tweaks examples below, only applicable if dockerdWithinRunnerContainer = false - dockerdContainerResources: - limits: - cpu: "4.0" - memory: "8Gi" - requests: - cpu: "2.0" - memory: "4Gi" - # Additional N number of sidecar containers - sidecarContainers: - - name: mysql - image: mysql:5.7 - env: - - name: MYSQL_ROOT_PASSWORD - value: abcd1234 - securityContext: - runAsUser: 0 - # workDir if not specified (default = /runner/_work) - # You can customise this setting allowing you to change the default working directory location - # for example, the below setting is the same as on the ubuntu-18.04 image - workDir: /home/runner/work - # You can mount some of the shared volumes to the dind container using dockerVolumeMounts, like any other volume mounting. - # NOTE: in case you want to use an hostPath like the following example, make sure that Kubernetes doesn't schedule more than one runner - # per physical host. You can achieve that by setting pod anti-affinity rules and/or resource requests/limits. - volumes: - - name: docker-extra - hostPath: - path: /mnt/docker-extra - type: DirectoryOrCreate - - name: repo - hostPath: - path: /mnt/repo - type: DirectoryOrCreate - dockerVolumeMounts: - - mountPath: /var/lib/docker - name: docker-extra - # You can mount some of the shared volumes to the runner container using volumeMounts. - # NOTE: Do not try to mount the volume onto the runner workdir itself as it will not work. You could mount it however on a subdirectory in the runner workdir - # Please see https://github.com/actions/actions-runner-controller/issues/630#issuecomment-862087323 for more information. - volumeMounts: - - mountPath: /home/runner/work/repo - name: repo - # Optional storage medium type of runner volume mount. - # More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir - # "" (default) = Node's default medium - # Memory = RAM-backed filesystem (tmpfs) - # NOTE: Using RAM-backed filesystem gives you fastest possible storage on your host nodes. - volumeStorageMedium: "" - # Total amount of local storage resources required for runner volume mount. - # The default limit is undefined. - # NOTE: You can make sure that nodes' resources are never exceeded by limiting used storage size per runner pod. - # You can even disable the runner mount completely by setting limit to zero if dockerdWithinRunnerContainer = true. - # Please see https://github.com/actions/actions-runner-controller/pull/674 for more information. - volumeSizeLimit: 4Gi - # Optional name of the container runtime configuration that should be used for pods. - # This must match the name of a RuntimeClass resource available on the cluster. - # More info: https://kubernetes.io/docs/concepts/containers/runtime-class - runtimeClassName: "runc" - # This is an advanced configuration. Don't touch it unless you know what you're doing. - containers: - - name: runner - # Usually, the runner container's privileged field is derived from dockerdWithinRunnerContainer. - # But in the case where you need to run privileged job steps even if you don't use docker/don't need dockerd within the runner container, - # just specified `privileged: true` like this. - # See https://github.com/actions/actions-runner-controller/issues/1282 - # Do note that specifying `privileged: false` while using dind is very likely to fail, even if you use some vm-based container runtimes - # like firecracker and kata. Basically they run containers within dedicated micro vms and so - # it's more like you can use `privileged: true` safer with those runtimes. - # - # privileged: true -``` - -### Runner Graceful Termination - -As of ARC 0.27.0 (unreleased as of 2022/09/30), runners can only wait for 15 seconds by default on pod termination. - -This can be problematic in two scenarios: - -- Scenario 1 - RunnerSet-only: You're triggering updates other than replica changes to `RunnerSet` very often- With current implementation, every update except `replicas` change to RunnerSet may result in terminating the in-progress workflow jobs to fail. -- Scenario 2 - RunnerDeployment and RunnerSet: You have another Kubernetes controller that evicts runner pods directly, not consulting ARC. - -> RunnerDeployment is not affected by the Scenario 1 as RunnerDeployment-managed runners are already tolerable to unlimitedly long in-progress running job while being replaced, as it's graceful termination process is handled outside of the entrypoint and the Kubernetes' pod termination process. - -To make it more reliable, please set `spec.template.spec.terminationGracePeriodSeconds` field and the `RUNNER_GRACEFUL_STOP_TIMEOUT` environment variable appropriately. - -If you want the pod to terminate in approximately 110 seconds at the latest since the termination request, try `terminationGracePeriodSeconds` of `110` and `RUNNER_GRACEFUL_STOP_TIMEOUT` of like `90`. - -The difference between `terminationGracePeriodSeconds` and `RUNNER_GRACEFUL_STOP_TIMEOUT` can vary depending on your environment and cluster. - -The idea is two fold: - -- `RUNNER_GRACEFUL_STOP_TIMEOUT` is for giving the runner the longest possible time to wait for the in-progress job to complete. You should keep this smaller than `terminationGracePeriodSeconds` so that you don't unnecessarily cancel running jobs. -- `terminationGracePeriodSeconds` is for giving the runner the longest possible time to stop before disappear. If the pod forcefully terminated before a graceful stop, the job running within the runner pod can hang like 10 minutes in the GitHub Actions Workflow Run/Job UI. A correct value for this avoids the hang, even though it had to cancel the running job due to the approaching deadline. - -> We know the default 15 seconds timeout is too short to be useful at all. -> In near future, we might raise the default to, for example, 100 seconds, so that runners that are tend to run up to 100 seconds jobs can -> terminate gracefully without failing running jobs. It will also allow the job which were running on the node that was requsted for termination -> to correct report its status as "cancelled", rather than hanging approximately 10 minutes in the Actions Web UI until it finally fails(without any specific error message). -> 100 seconds is just an example. It might be a good default in case you're using AWS EC2 Spot Instances because they tend to send -> termination notice two minutes before the termination. -> If you have any other suggestions for the default value, please share your thoughts in Discussions. - -#### Status and Future of this feature - -Note that this feature is currently intended for use with runner pods being terminated by other Kubernetes controller and human operators, or those being replaced by ARC RunnerSet controller due to spec change(s) except `replicas`. RunnerDeployment has no issue for the scenario. non-dind runners are affected but this feature does not support those yet. - -For example, a runner pod can be terminated prematurely by cluster-autoscaler when it's about to terminate the node on cluster scale down. -All the variants of RunnerDeployment and RunnerSet managed runner pods, including runners with dockerd sidecars, rootless and rootful dind runners are affected by it. For dind runner pods only, you can use this feature to fix or alleviate the issue. - -To be clear, an increase/decrease in the desired replicas of RunnerDeployment and RunnerSet will never result in worklfow jobs being termianted prematurely. -That's because it's handled BEFORE the runner pod is terminated, by ARC respective controller. - -For anyone interested in improving it, adding a dedicated pod finalizer for this issue will never work. -It's due to that a pod finalizer can't prevent SIGTERM from being sent when deletionTimestamp is updated to non-zero, -which triggers a Kubernetes pod termination process anyway. -What we want here is to delay the SIGTERM sent to the `actions/runner` process running within the runner container of the runner pod, -not blocking the removal of the pod resource in the Kubernetes cluster. - -Also, handling all the graceful termination scenarios with a single method may or may not work. - -The most viable option would be to do the graceful termination handling entirely in the SIGTERM handler within the runner entrypoint. -But this may or may not work long-term, as it's subject to terminationGracePeriodSeconds anyway and the author of this note thinks there still is -no formally defined limit for terminationGracePeriodSeconds and hence we arent' sure how long terminationGracePeriodSeconds can be set in practice. -Also, I think the max workflow job duration is approximately 24h. So Kubernetes must formally support setting terminationGracePeriodSeconds of 24h if -we are moving entirely to the entrypoint based solution. -If you have any insights about the matter, chime in to the development of ARC! - -That's why we still rely on ARC's own graceful termination logic in Runner controller for the spec change and replica increase/decrease of RunnerDeployment and -replica increase/decrease of RunnerSet, even though we now have the entrypoint based graceful stop handler. - -Our plan is to improve the RunnerSet to have the same logic as the Runner controller so that you don't need this feature based on the SIGTERM handler for the spec change of RunnerSet. - -### Custom Volume mounts - -You can configure your own custom volume mounts. For example to have the work/docker data in memory or on NVME SSD, for -i/o intensive builds. Other custom volume mounts should be possible as well, see [kubernetes documentation](https://kubernetes.io/docs/concepts/storage/volumes/) - -#### RAM Disk - -Example how to place the runner work dir, docker sidecar and /tmp within the runner onto a ramdisk. -```yaml -kind: RunnerDeployment -spec: - template: - spec: - dockerVolumeMounts: - - mountPath: /var/lib/docker - name: docker - volumeMounts: - - mountPath: /tmp - name: tmp - volumes: - - name: docker - emptyDir: - medium: Memory - - name: work # this volume gets automatically used up for the workdir - emptyDir: - medium: Memory - - name: tmp - emptyDir: - medium: Memory - ephemeral: true # recommended to not leak data between builds. -``` - -#### NVME SSD - -In this example we provide NVME backed storage for the workdir, docker sidecar and /tmp within the runner. -Here we use a working example on GKE, which will provide the NVME disk at /mnt/disks/ssd0. We will be placing the respective volumes in subdirs here and in order to be able to run multiple runners we will use the pod name as a prefix for subdirectories. Also the disk will fill up over time and disk space will not be freed until the node is removed. - -**Beware** that running these persistent backend volumes **leave data behind** between 2 different jobs on the workdir and `/tmp` with `ephemeral: false`. - -```yaml -kind: RunnerDeployment -spec: - template: - spec: - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - dockerVolumeMounts: - - mountPath: /var/lib/docker - name: docker - subPathExpr: $(POD_NAME)-docker - - mountPath: /runner/_work - name: work - subPathExpr: $(POD_NAME)-work - volumeMounts: - - mountPath: /runner/_work - name: work - subPathExpr: $(POD_NAME)-work - - mountPath: /tmp - name: tmp - subPathExpr: $(POD_NAME)-tmp - dockerEnv: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - volumes: - - hostPath: - path: /mnt/disks/ssd0 - name: docker - - hostPath: - path: /mnt/disks/ssd0 - name: work - - hostPath: - path: /mnt/disks/ssd0 - name: tmp - ephemeral: true # VERY important. otherwise data inside the workdir and /tmp is not cleared between builds -``` - -#### Docker image layers caching - -> **Note**: Ensure that the volume mount is added to the container that is running the Docker daemon. - -`docker` stores pulled and built image layers in the [daemon's (not client)](https://docs.docker.com/get-started/overview/#docker-architecture) [local storage area](https://docs.docker.com/storage/storagedriver/#sharing-promotes-smaller-images) which is usually at `/var/lib/docker`. - -By leveraging RunnerSet's dynamic PV provisioning feature and your CSI driver, you can let ARC maintain a pool of PVs that are -reused across runner pods to retain `/var/lib/docker`. - -_Be sure to add the volume mount to the container that is supposed to run the docker daemon._ - -_Be sure to trigger several workflow runs before checking if the cache is effective. ARC requires an `Available` PV to be reused for the new runner pod, and a PV becomes `Available` only after some time after the previous runner pod that was using the PV terminated. See [the related discussion](https://github.com/actions/actions-runner-controller/discussions/1605)._ - -By default, ARC creates a sidecar container named `docker` within the runner pod for running the docker daemon. In that case, -it's where you need the volume mount so that the manifest looks like: - -```yaml -kind: RunnerSet -metadata: - name: example -spec: - template: - spec: - containers: - - name: docker - volumeMounts: - - name: var-lib-docker - mountPath: /var/lib/docker - volumeClaimTemplates: - - metadata: - name: var-lib-docker - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Mi - storageClassName: var-lib-docker -``` - -With `dockerdWithinRunnerContainer: true`, you need to add the volume mount to the `runner` container. - -#### Go module and build caching - -`Go` is known to cache builds under `$HOME/.cache/go-build` and downloaded modules under `$HOME/pkg/mod`. -The module cache dir can be customized by setting `GOMOD_CACHE` so by setting it to somewhere under `$HOME/.cache`, -we can have a single PV to host both build and module cache, which might improve Go module downloading and building time. - -_Be sure to trigger several workflow runs before checking if the cache is effective. ARC requires an `Available` PV to be reused for the new runner pod, and a PV becomes `Available` only after some time after the previous runner pod that was using the PV terminated. See [the related discussion](https://github.com/actions/actions-runner-controller/discussions/1605)._ - -```yaml -kind: RunnerSet -metadata: - name: example -spec: - template: - spec: - containers: - - name: runner - env: - - name: GOMODCACHE - value: "/home/runner/.cache/go-mod" - volumeMounts: - - name: cache - mountPath: "/home/runner/.cache" - volumeClaimTemplates: - - metadata: - name: cache - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Mi - storageClassName: cache -``` - -#### PV-backed runner work directory - -ARC works by automatically creating runner pods for running [`actions/runner`](https://github.com/actions/runner) and [running `config.sh`](https://docs.github.com/en/actions/hosting-your-own-runners/adding-self-hosted-runners#adding-a-self-hosted-runner-to-a-repository) which you had to ran manually without ARC. - -`config.sh` is the script provided by `actions/runner` to pre-configure the runner process before being started. One of the options provided by `config.sh` is `--work`, -which specifies the working directory where the runner runs your workflow jobs in. - -The volume and the partition that hosts the work directory should have several or dozens of GBs free space that might be used by your workflow jobs. - -By default, ARC uses `/runner/_work` as work directory, which is powered by Kubernetes's `emptyDir`. [`emptyDir` is usually backed by a directory created within a host's volume](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir), somewhere under `/var/lib/kuberntes/pods`. Therefore -your host's volume that is backing `/var/lib/kubernetes/pods` must have enough free space to serve all the concurrent runner pods that might be deployed onto your host at the same time. - -So, in case you see a job failure seemingly due to "disk full", it's very likely you need to reconfigure your host to have more free space. - -In case you can't rely on host's volume, consider using `RunnerSet` and backing the work directory with a ephemeral PV. - -Kubernetes 1.23 or greater provides the support for [generic ephemeral volumes](https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes), which is designed to support this exact use-case. It's defined in the Pod spec API so it isn't currently available for `RunnerDeployment`. `RunnerSet` is based on Kubernetes' `StatefulSet` which mostly embeds the Pod spec under `spec.template.spec`, so there you go. - -```yaml -kind: RunnerSet -metadata: - name: example -spec: - template: - spec: - containers: - - name: runner - volumeMounts: - - mountPath: /runner/_work - name: work - - name: docker - volumeMounts: - - mountPath: /runner/_work - name: work - volumes: - - name: work - ephemeral: - volumeClaimTemplate: - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: "runner-work-dir" - resources: - requests: - storage: 10Gi -``` - -### Runner Labels - -To run a workflow job on a self-hosted runner, you can use the following syntax in your workflow: - -```yaml -jobs: - release: - runs-on: self-hosted -``` - -When you have multiple kinds of self-hosted runners, you can distinguish between them using labels. In order to do so, you can specify one or more labels in your `Runner` or `RunnerDeployment` spec. - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: custom-runner -spec: - replicas: 1 - template: - spec: - repository: actions/actions-runner-controller - labels: - - custom-runner -``` - -Once this spec is applied, you can observe the labels for your runner from the repository or organization in the GitHub settings page for the repository or organization. You can now select a specific runner from your workflow by using the label in `runs-on`: - -```yaml -jobs: - release: - runs-on: custom-runner -``` - -When using labels there are a few things to be aware of: - -1. `self-hosted` is implict with every runner as this is an automatic label GitHub apply to any self-hosted runner. As a result ARC can treat all runners as having this label without having it explicitly defined in a runner's manifest. You do not need to explicitly define this label in your runner manifests (you can if you want though). -2. In addition to the `self-hosted` label, GitHub also applies a few other [default](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow#using-default-labels-to-route-jobs) labels to any self-hosted runner. The other default labels relate to the architecture of the runner and so can't be implicitly applied by ARC as ARC doesn't know if the runner is `linux` or `windows`, `x64` or `ARM64` etc. If you wish to use these labels in your workflows and have ARC scale runners accurately you must also add them to your runner manifests. - -### Runner Groups - -Runner groups can be used to limit which repositories are able to use the GitHub Runner at an organization level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced. - -To add the runner to the group `NewGroup`, specify the group in your `Runner` or `RunnerDeployment` spec. - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: custom-runner -spec: - replicas: 1 - template: - spec: - group: NewGroup -``` - -GitHub supports custom visibility in a Runner Group to make it available to a specific set of repositories only. By default if no GitHub -authentication is included in the webhook server ARC will be assumed that all runner groups to be usable in all repositories. -Currently, GitHub does not include the repository runner group membership information in the workflow_job event (or any webhook). To make the ARC "runner group aware" additional GitHub API calls are needed to find out what runner groups are visible to the webhook's repository. This behaviour will impact your rate-limit budget and so the option needs to be explicitly configured by the end user. - -This option will be enabled when proper GitHub authentication options (token, app or basic auth) are provided in the webhook server and `useRunnerGroupsVisibility` is set to true, e.g. - -```yaml -githubWebhookServer: - enabled: false - replicaCount: 1 - useRunnerGroupsVisibility: true -``` - -### Runner Entrypoint Features - -> Environment variable values must all be strings - -The entrypoint script is aware of a few environment variables for configuring features: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-runnerdeployment -spec: - template: - spec: - env: - # Disable various runner entrypoint log levels - - name: LOG_DEBUG_DISABLED - value: "true" - - name: LOG_NOTICE_DISABLED - value: "true" - - name: LOG_WARNING_DISABLED - value: "true" - - name: LOG_ERROR_DISABLED - value: "true" - - name: LOG_SUCCESS_DISABLED - value: "true" - # Issues a sleep command at the start of the entrypoint - - name: STARTUP_DELAY_IN_SECONDS - value: "2" - # Specify the duration to wait for the docker daemon to be available - # The default duration of 120 seconds is sometimes too short - # to reliably wait for the docker daemon to start - # See https://github.com/actions/actions-runner-controller/issues/1804 - - name: WAIT_FOR_DOCKER_SECONDS - value: 120 - # Disables the wait for the docker daemon to be available check - - name: DISABLE_WAIT_FOR_DOCKER - value: "true" - # Disables automatic runner updates - # WARNING : Upon a new version of the actions/runner software being released - # GitHub stops allocating jobs to runners on the previous version of the - # actions/runner software after 30 days. - - name: DISABLE_RUNNER_UPDATE - value: "true" -``` - -There are a few advanced envvars also that are available only for dind runners: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-runnerdeployment -spec: - template: - spec: - dockerdWithinRunnerContainer: true - image: summerwind/actions-runner-dind - env: - # Sets the respective default-address-pools fields within dockerd daemon.json - # See https://github.com/actions/actions-runner-controller/pull/1971 for more information. - # Also see https://github.com/docker/docs/issues/8663 for the default base/size values in dockerd. - - name: DOCKER_DEFAULT_ADDRESS_POOL_BASE - value: "172.17.0.0/12" - - name: DOCKER_DEFAULT_ADDRESS_POOL_SIZE - value: "24" -``` - -### Using IRSA (IAM Roles for Service Accounts) in EKS - -> This feature requires controller version => [v0.15.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.15.0) - -Similar to regular pods and deployments, you firstly need an existing service account with the IAM role associated. -Create one using e.g. `eksctl`. You can refer to [the EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) for more details. - -Once you set up the service account, all you need is to add `serviceAccountName` and `fsGroup` to any pods that use the IAM-role enabled service account. - -`fsGroup` needs to be set to the UID of the `runner` Linux user that runs the runner agent (and dockerd in case you use dind-runner). For anyone using an Ubuntu 20.04 runner image it's `1000` and for Ubuntu 22.04 one it's `1001`. - -For `RunnerDeployment`, you can set those two fields under the runner spec at `RunnerDeployment.Spec.Template`: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: example-runnerdeploy -spec: - template: - spec: - repository: USER/REO - serviceAccountName: my-service-account - securityContext: - # For Ubuntu 20.04 runner - fsGroup: 1000 - # Use 1001 for Ubuntu 22.04 runner - #fsGroup: 1001 -``` -### Software Installed in the Runner Image - -**Cloud Tooling**
-The project supports being deployed on the various cloud Kubernetes platforms (e.g. EKS), it does not however aim to go beyond that. No cloud specific tooling is bundled in the base runner, this is an active decision to keep the overhead of maintaining the solution manageable. - -**Bundled Software**
-The GitHub hosted runners include a large amount of pre-installed software packages. GitHub maintains a list in README files at . - -This solution maintains a few Ubuntu based runner images, these images do not contain all of the software installed on the GitHub runners. The images contain the following subset of packages from the GitHub runners: - -- Some basic CLI packages -- Git -- Git LFS -- Docker -- Docker Compose - -The virtual environments from GitHub contain a lot more software packages (different versions of Java, Node.js, Golang, .NET, etc) which are not provided in the runner image. Most of these have dedicated setup actions which allow the tools to be installed on-demand in a workflow, for example: `actions/setup-java` or `actions/setup-node` - -If there is a need to include packages in the runner image for which there is no setup action, then this can be achieved by building a custom container image for the runner. The easiest way is to start with the `summerwind/actions-runner` image and then install the extra dependencies directly in the docker image: - -```shell -FROM summerwind/actions-runner:latest - -RUN sudo apt-get update -y \ - && sudo apt-get install $YOUR_PACKAGES - && sudo rm -rf /var/lib/apt/lists/* -``` - -You can then configure the runner to use a custom docker image by configuring the `image` field of a `RunnerDeployment` or `RunnerSet`: - -```yaml -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: custom-runner -spec: - repository: actions/actions-runner-controller - image: YOUR_CUSTOM_RUNNER_IMAGE -``` - -### Using without cert-manager - -There are two methods of deploying without cert-manager, you can generate your own certificates or rely on helm to generate a CA and certificate each time you update the chart. - -#### Using custom certificates - -Assuming you are installing in the default namespace, ensure your certificate has SANs: - -* `actions-runner-controller-webhook.actions-runner-system.svc` -* `actions-runner-controller-webhook.actions-runner-system.svc.cluster.local` - -It is possible to use a self-signed certificate by following a guide like -[this one](https://mariadb.com/docs/security/encryption/in-transit/create-self-signed-certificates-keys-openssl/) -using `openssl`. - -Install your certificate as a TLS secret: - -```shell -$ kubectl create secret tls actions-runner-controller-serving-cert \ - -n actions-runner-system \ - --cert=path/to/cert/file \ - --key=path/to/key/file -``` - -Set the Helm chart values as follows: - -```shell -$ CA_BUNDLE=$(cat path/to/ca.pem | base64) -$ helm upgrade --install actions/actions-runner-controller \ - certManagerEnabled=false \ - admissionWebHooks.caBundle=${CA_BUNDLE} -``` - -#### Using helm to generate CA and certificates - -Set the Helm chart values as follows: - -```shell -$ helm upgrade --install actions/actions-runner-controller \ - certManagerEnabled=false -``` - -This generates a temporary CA using the helm `genCA` function and issues a certificate for the webhook. Note that this approach rotates the CA and certificate each time `helm install` or `helm upgrade` are run. In effect, this will cause short interruptions to the mutating webhook while the ARC pods stabilize and use the new certificate each time `helm upgrade` is called for the chart. The outage can affect kube-api activity due to the way mutating webhooks are called. - -### Setting up Windows Runners - -The main two steps in enabling Windows self-hosted runners are: - -- Using `nodeSelector`'s property to filter the `cert-manger` and `actions-runner-controller` pods -- Deploying a RunnerDeployment using a Windows-based image - -For the first step, you need to set the `nodeSelector.kubernetes.io/os` property in both the `cert-manager` and the `actions-runner-controller` deployments to `linux` so that the pods for these two deployments are only scheduled in Linux nodes. You can do this as follows: - -```yaml -nodeSelector: - kubernetes.io/os: linux -``` - -`cert-manager` has 4 different application within it the main application, the `webhook`, the `cainjector` and the `startupapicheck`. In the parameters or values file you use for the deployment you need to add the `nodeSelector` property four times, one for each application. - -For the `actions-runner-controller` you only have to use the `nodeSelector` only for the main deployment, so it only has to be set once. - -Once this is set up, you will need to deploy two different `RunnerDeployment`'s, one for Windows and one for Linux. -The Linux deployment can use either the default image or a custom one, however, there isn't a default Windows image so for Windows deployments you will have to build your own image. - -Below we share an example of the YAML used to create the deployment for each Operating System and a Dockerfile for the Windows deployment. - -
Windows -

- -#### RunnerDeployment - -```yaml ---- -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: k8s-runners-windows - namespace: actions-runner-system -spec: - template: - spec: - image: /: - dockerdWithinRunnerContainer: true - nodeSelector: - kubernetes.io/os: windows - kubernetes.io/arch: amd64 - repository: / - labels: - - windows - - X64 -``` - -#### Dockerfile - -> Note that you'd need to patch the below Dockerfile if you need a graceful termination. -> See https://github.com/actions/actions-runner-controller/pull/1608/files#r917319574 for more information. - -```Dockerfile -FROM mcr.microsoft.com/windows/servercore:ltsc2019 - -WORKDIR /actions-runner - -SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop';$ProgressPreference='silentlyContinue';"] - -RUN Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v2.292.0/actions-runner-win-x64-2.292.0.zip -OutFile actions-runner-win-x64-2.292.0.zip - -RUN if((Get-FileHash -Path actions-runner-win-x64-2.292.0.zip -Algorithm SHA256).Hash.ToUpper() -ne 'f27dae1413263e43f7416d719e0baf338c8d80a366fed849ecf5fffcec1e941f'.ToUpper()){ throw 'Computed checksum did not match' } - -RUN Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('actions-runner-win-x64-2.292.0.zip', $PWD) - -RUN Invoke-WebRequest -Uri 'https://aka.ms/install-powershell.ps1' -OutFile install-powershell.ps1; ./install-powershell.ps1 -AddToPath - -RUN powershell Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) - -RUN powershell choco install git.install --params "'/GitAndUnixToolsOnPath'" -y - -RUN powershell choco feature enable -n allowGlobalConfirmation - -CMD [ "pwsh", "-c", "./config.cmd --name $env:RUNNER_NAME --url https://github.com/$env:RUNNER_REPO --token $env:RUNNER_TOKEN --labels $env:RUNNER_LABELS --unattended --replace --ephemeral; ./run.cmd"] -``` -

-
- - -
Linux -

- -#### RunnerDeployment - -```yaml ---- -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: k8s-runners-linux - namespace: actions-runner-system -spec: - template: - spec: - image: /: - nodeSelector: - kubernetes.io/os: linux - kubernetes.io/arch: amd64 - repository: : - labels: - - linux - - X64 -``` -

-
- -After both `RunnerDeployment`'s are up and running, you can now proceed to deploy the `HorizontalRunnerAutoscaler` for each deployment. - -### Multitenancy - -> This feature requires controller version => [v0.26.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.26.0) - -In a large enterprise, there might be many GitHub organizations that requires self-hosted runners. Previously, the only way to provide ARC-managed self-hosted runners in such environment was [Deploying Multiple Controllers](#deploying-multiple-controllers), which incurs overhead due to it requires one ARC installation per GitHub organization. - -With multitenancy, you can let ARC manage self-hosted runners across organizations. It's enabled by default and the only thing you need to start using it is to set the `spec.githubAPICredentialsFrom.secretRef.name` fields for the following resources: - -- `HorizontalRunnerAutoscaler` -- `RunnerSet` - -Or `spec.template.spec.githubAPICredentialsFrom.secretRef.name` field for the following resource: - -- `RunnerDeployment` - -> Although not explained above, `spec.githubAPICredentialsFrom` fields do exist in `Runner` and `RunnerReplicaSet`. A comparable pod annotation exists for the runner pod, too. -> However, note that `Runner`, `RunnerReplicaSet` and runner pods are implementation details and are managed by `RunnerDeployment` and ARC. -> Usually you don't need to manually set the fields for those resources. - -`githubAPICredentialsFrom.secretRef.name` should refer to the name of the Kubernetes secret that contains either PAT or GitHub App credentials that is used for GitHub API calls for the said resource. - -Usually, you should have a set of GitHub App credentials per a GitHub organization and you would have a RunnerDeployment and a HorizontalRunnerAutoscaler per an organization runner group. So, you might end up having the following resources for each organization: - -- 1 Kubernetes secret that contains GitHub App credentials -- 1 RunnerDeployment/RunnerSet and 1 HorizontalRunnerAutoscaler per Runner Group - -And the RunnerDeployment/RunnerSet and HorizontalRunnerAutoscaler should have the same value for `spec.githubAPICredentialsFrom.secretRef.name`, which refers to the name of the Kubernetes secret. - -```yaml -kind: Secret -data: - github_app_id: ... - github_app_installation_id: ... - github_app_private_key: ... ---- -kind: RunnerDeployment -metadata: - namespace: org1-runners -spec: - template: - spec: - githubAPICredentialsFrom: - secretRef: - name: org1-github-app ---- -kind: HorizontalRunnerAutoscaler -metadata: - namespace: org1-runners -spec: - githubAPICredentialsFrom: - secretRef: - name: org1-github-app -``` - -> Do note that, as shown in the above example, you usually set the same secret name to `githubAPICredentialsFrom.secretRef.name` fields of both `RunnerDeployment` and `HorizontalRunnerAutoscaler`, so that GitHub API calls for the same set of runners shares the specified credentials, regardless of -when and which varying ARC component(`horizontalrunnerautoscaler-controller`, `runnerdeployment-controller`, `runnerreplicaset-controller`, `runner-controller` or `runnerpod-controller`) makes specific API calls. -> Just don't be surprised you have to repeat `githubAPICredentialsFrom.secretRef.name` settings among two resources! - -Please refer to [Deploying Using GitHub App Authentication](#deploying-using-github-app-authentication) for how you could create the Kubernetes secret containing GitHub App credentials. - -### Metrics - -The controller also exposes Prometheus metrics on a `/metrics` endpoint. By default this is on port `8443` behind an RBAC proxy. - -If needed, the proxy can be disabled in the `values.yml` file: - -```diff -metrics: - serviceAnnotations: {} - serviceMonitor: false - serviceMonitorLabels: {} -+ port: 8080 - proxy: -+ enabled: false -``` - -If Prometheus is available inside the cluster, then add some `podAnnotations` to begin scraping the metrics: - -```diff -podAnnotations: -+ prometheus.io/scrape: "true" -+ prometheus.io/path: /metrics -+ prometheus.io/port: "8080" -``` - -# Troubleshooting - -See [troubleshooting guide](../TROUBLESHOOTING.md) for solutions to various problems people have run into consistently. - -# Contributing - -For more details on contributing to the project (including requirements) please check out [Getting Started with Contributing](../CONTRIBUTING.md). diff --git a/docs/installing-arc.md b/docs/installing-arc.md new file mode 100644 index 0000000000..48ba95eeda --- /dev/null +++ b/docs/installing-arc.md @@ -0,0 +1,26 @@ +# Installing ARC + +## Installation + +By default, actions-runner-controller uses [cert-manager](https://cert-manager.io/docs/installation/kubernetes/) for certificate management of Admission Webhook. Make sure you have already installed cert-manager before you install. The installation instructions for the cert-manager can be found below. + +- [Installing cert-manager on Kubernetes](https://cert-manager.io/docs/installation/kubernetes/) + +After installing cert-manager, install the custom resource definitions and actions-runner-controller with `kubectl` or `helm`. This will create an actions-runner-system namespace in your Kubernetes and deploy the required resources. + +**Kubectl Deployment:** + +```shell +# REPLACE "v0.25.2" with the version you wish to deploy +kubectl create -f https://github.com/actions/actions-runner-controller/releases/download/v0.25.2/actions-runner-controller.yaml +``` + +**Helm Deployment:** + +Configure your values.yaml, see the chart's [README](../charts/actions-runner-controller/README.md) for the values documentation + +```shell +helm repo add actions-runner-controller https://actions-runner-controller.github.io/actions-runner-controller +helm upgrade --install --namespace actions-runner-system --create-namespace \ + --wait actions-runner-controller actions/actions-runner-controller +``` diff --git a/docs/managing-access-with-runner-groups.md b/docs/managing-access-with-runner-groups.md new file mode 100644 index 0000000000..551dfc3999 --- /dev/null +++ b/docs/managing-access-with-runner-groups.md @@ -0,0 +1,32 @@ +# Managing access with runner groups + +## Runner Groups + +Runner groups can be used to limit which repositories are able to use the GitHub Runner at an organization level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced. + +To add the runner to the group `NewGroup`, specify the group in your `Runner` or `RunnerDeployment` spec. + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: custom-runner +spec: + replicas: 1 + template: + spec: + group: NewGroup +``` + +GitHub supports custom visibility in a Runner Group to make it available to a specific set of repositories only. By default if no GitHub +authentication is included in the webhook server ARC will be assumed that all runner groups to be usable in all repositories. +Currently, GitHub does not include the repository runner group membership information in the workflow_job event (or any webhook). To make the ARC "runner group aware" additional GitHub API calls are needed to find out what runner groups are visible to the webhook's repository. This behaviour will impact your rate-limit budget and so the option needs to be explicitly configured by the end user. + +This option will be enabled when proper GitHub authentication options (token, app or basic auth) are provided in the webhook server and `useRunnerGroupsVisibility` is set to true, e.g. + +```yaml +githubWebhookServer: + enabled: false + replicaCount: 1 + useRunnerGroupsVisibility: true +``` \ No newline at end of file diff --git a/docs/monitoring-and-troubleshooting.md b/docs/monitoring-and-troubleshooting.md new file mode 100644 index 0000000000..7c4e33f9c6 --- /dev/null +++ b/docs/monitoring-and-troubleshooting.md @@ -0,0 +1,30 @@ +# Monitoring and troubleshooting + +## Metrics + +The controller also exposes Prometheus metrics on a `/metrics` endpoint. By default this is on port `8443` behind an RBAC proxy. + +If needed, the proxy can be disabled in the `values.yml` file: + +```diff +metrics: + serviceAnnotations: {} + serviceMonitor: false + serviceMonitorLabels: {} ++ port: 8080 + proxy: ++ enabled: false +``` + +If Prometheus is available inside the cluster, then add some `podAnnotations` to begin scraping the metrics: + +```diff +podAnnotations: ++ prometheus.io/scrape: "true" ++ prometheus.io/path: /metrics ++ prometheus.io/port: "8080" +``` + +## Troubleshooting + +See [troubleshooting guide](../TROUBLESHOOTING.md) for solutions to various problems people have run into consistently. \ No newline at end of file diff --git a/docs/quickstart.md b/docs/quickstart.md new file mode 100644 index 0000000000..04eb567e5f --- /dev/null +++ b/docs/quickstart.md @@ -0,0 +1,151 @@ +# Actions Runner Controller Quickstart + +GitHub Actions automates the deployment of code to different environments, including production. The environments contain the `GitHub Runner` software which executes the automation. `GitHub Runner` can be run in GitHub-hosted cloud or self-hosted environments. Self-hosted environments offer more control of hardware, operating system, and software tools. They can be run on physical machines, virtual machines, or in a container. Containerized environments are lightweight, loosely coupled, highly efficient and can be managed centrally. However, they are not straightforward to use. + +`Actions Runner Controller (ARC)` makes it simpler to run self hosted environments on Kubernetes(K8s) cluster. + +With ARC you can : + +- **Deploy self hosted runners on Kubernetes cluster** with a simple set of commands. +- **Auto scale runners** based on demand. +- **Setup across GitHub editions** including GitHub Enterprise editions and GitHub Enterprise Cloud. + +## Overview + +For an overview of ARC, please refer to "[ARC Overview](https://github.com/actions/actions-runner-controller/blob/master/docs/Actions-Runner-Controller-Overview.md)." + +## Getting Started + +ARC can be setup with just a few steps. + +In this section we will setup prerequisites, deploy ARC into a K8s cluster, and then run GitHub Action workflows on that cluster. + +### Prerequisites + +
Create a K8s cluster, if not available. + +If you don't have a K8s cluster, you can install a local environment using minikube. For more information, see "Installing minikube." + +
+ +:one: Install cert-manager in your cluster. For more information, see "[cert-manager](https://cert-manager.io/docs/installation/)." + +```shell +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.8.2/cert-manager.yaml +``` + + *note:- This command uses v1.8.2. Please replace with a later version, if available. + +>You may also install cert-manager using Helm. For instructions, see "[Installing with Helm](https://cert-manager.io/docs/installation/helm/#installing-with-helm)." + +:two: Next, Generate a Personal Access Token (PAT) for ARC to authenticate with GitHub. + +- Login to your GitHub account and Navigate to "[Create new Token](https://github.com/settings/tokens/new)." +- Select **repo**. +- Click **Generate Token** and then copy the token locally ( we’ll need it later). + +### Deploy and Configure ARC + +1️⃣ Deploy and configure ARC on your K8s cluster. You may use Helm or Kubectl. + +
Helm deployment + +##### Add repository + +```shell +helm repo add actions-runner-controller https://actions-runner-controller.github.io/actions-runner-controller +``` + +##### Install Helm chart + +```shell +helm upgrade --install --namespace actions-runner-system --create-namespace\ + --set=authSecret.create=true\ + --set=authSecret.github_token="REPLACE_YOUR_TOKEN_HERE"\ + --wait actions-runner-controller actions/actions-runner-controller +``` + + *note:- Replace REPLACE_YOUR_TOKEN_HERE with your PAT that was generated previously. +
+ +
Kubectl deployment + +##### Deploy ARC + +```shell +kubectl apply -f \ +https://github.com/actions/actions-runner-controller/\ +releases/download/v0.22.0/actions-runner-controller.yaml +``` + + *note:- Replace "v0.22.0" with the version you wish to deploy + +##### Configure Personal Access Token + +```shell +kubectl create secret generic controller-manager \ + -n actions-runner-system \ + --from-literal=github_token=REPLACE_YOUR_TOKEN_HERE +```` + + *note:- Replace REPLACE_YOUR_TOKEN_HERE with your PAT that was generated previously. + +
+ +2️⃣ Create the GitHub self hosted runners and configure to run against your repository. + +Create a `runnerdeployment.yaml` file and copy the following YAML contents into it: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runnerdeploy +spec: + replicas: 1 + template: + spec: + repository: mumoshu/actions-runner-controller-ci +```` + *note:- Replace "mumoshu/actions-runner-controller-ci" with your repository name. + +Apply this file to your K8s cluster. +```shell +kubectl apply -f runnerdeployment.yaml +```` + +*🎉 We are done - now we should have self hosted runners running in K8s configured to your repository. 🎉* + +Next - lets verify our setup and execute some workflows. + +### Verify and Execute Workflows + +:one: Verify that your setup is successful: +```shell + +$ kubectl get runners +NAME REPOSITORY STATUS +example-runnerdeploy2475h595fr mumoshu/actions-runner-controller-ci Running + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +example-runnerdeploy2475ht2qbr 2/2 Running 0 1m +```` + +Also, this runner has been registered directly to the specified repository, you can see it in repository settings. For more information, see "[Checking the status of a self-hosted runner - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/monitoring-and-troubleshooting-self-hosted-runners#checking-the-status-of-a-self-hosted-runner)." + +:two: You are ready to execute workflows against this self-hosted runner. For more information, see "[Using self-hosted runners in a workflow - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow#using-self-hosted-runners-in-a-workflow)." + +There is also a quick start guide to get started on Actions, For more information, please refer to "[Quick start Guide to GitHub Actions](https://docs.github.com/en/actions/quickstart)." + +## Learn more + +For more detailed documentation, please refer to "[Detailed Documentation](https://github.com/actions/actions-runner-controller/blob/master/docs/detailed-docs.md)." + +## Contributing + +We welcome contributions from the community. For more details on contributing to the project (including requirements), please refer to "[Getting Started with Contributing](https://github.com/actions/actions-runner-controller/blob/master/CONTRIBUTING.md)." + +## Troubleshooting + +We are very happy to help you with any issues you have. Please refer to the "[Troubleshooting](https://github.com/actions/actions-runner-controller/blob/master/TROUBLESHOOTING.md)" section for common issues. diff --git a/docs/using-arc-across-organizations.md b/docs/using-arc-across-organizations.md new file mode 100644 index 0000000000..8b02433123 --- /dev/null +++ b/docs/using-arc-across-organizations.md @@ -0,0 +1,61 @@ +# Using ARC across organizations + +## Multitenancy + +> This feature requires controller version => [v0.26.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.26.0) + +In a large enterprise, there might be many GitHub organizations that requires self-hosted runners. Previously, the only way to provide ARC-managed self-hosted runners in such environment was [Deploying Multiple Controllers](#deploying-multiple-controllers), which incurs overhead due to it requires one ARC installation per GitHub organization. + +With multitenancy, you can let ARC manage self-hosted runners across organizations. It's enabled by default and the only thing you need to start using it is to set the `spec.githubAPICredentialsFrom.secretRef.name` fields for the following resources: + +- `HorizontalRunnerAutoscaler` +- `RunnerSet` + +Or `spec.template.spec.githubAPICredentialsFrom.secretRef.name` field for the following resource: + +- `RunnerDeployment` + +> Although not explained above, `spec.githubAPICredentialsFrom` fields do exist in `Runner` and `RunnerReplicaSet`. A comparable pod annotation exists for the runner pod, too. +> However, note that `Runner`, `RunnerReplicaSet` and runner pods are implementation details and are managed by `RunnerDeployment` and ARC. +> Usually you don't need to manually set the fields for those resources. + +`githubAPICredentialsFrom.secretRef.name` should refer to the name of the Kubernetes secret that contains either PAT or GitHub App credentials that is used for GitHub API calls for the said resource. + +Usually, you should have a set of GitHub App credentials per a GitHub organization and you would have a RunnerDeployment and a HorizontalRunnerAutoscaler per an organization runner group. So, you might end up having the following resources for each organization: + +- 1 Kubernetes secret that contains GitHub App credentials +- 1 RunnerDeployment/RunnerSet and 1 HorizontalRunnerAutoscaler per Runner Group + +And the RunnerDeployment/RunnerSet and HorizontalRunnerAutoscaler should have the same value for `spec.githubAPICredentialsFrom.secretRef.name`, which refers to the name of the Kubernetes secret. + +```yaml +kind: Secret +data: + github_app_id: ... + github_app_installation_id: ... + github_app_private_key: ... +--- +kind: RunnerDeployment +metadata: + namespace: org1-runners +spec: + template: + spec: + githubAPICredentialsFrom: + secretRef: + name: org1-github-app +--- +kind: HorizontalRunnerAutoscaler +metadata: + namespace: org1-runners +spec: + githubAPICredentialsFrom: + secretRef: + name: org1-github-app +``` + +> Do note that, as shown in the above example, you usually set the same secret name to `githubAPICredentialsFrom.secretRef.name` fields of both `RunnerDeployment` and `HorizontalRunnerAutoscaler`, so that GitHub API calls for the same set of runners shares the specified credentials, regardless of +when and which varying ARC component(`horizontalrunnerautoscaler-controller`, `runnerdeployment-controller`, `runnerreplicaset-controller`, `runner-controller` or `runnerpod-controller`) makes specific API calls. +> Just don't be surprised you have to repeat `githubAPICredentialsFrom.secretRef.name` settings among two resources! + +Please refer to [Deploying Using GitHub App Authentication](#deploying-using-github-app-authentication) for how you could create the Kubernetes secret containing GitHub App credentials. \ No newline at end of file diff --git a/docs/using-arc-runners-in-a-workflow.md b/docs/using-arc-runners-in-a-workflow.md new file mode 100644 index 0000000000..6d1fb459c9 --- /dev/null +++ b/docs/using-arc-runners-in-a-workflow.md @@ -0,0 +1,40 @@ +# Using ARC runners in a workflow + +## Runner Labels + +To run a workflow job on a self-hosted runner, you can use the following syntax in your workflow: + +```yaml +jobs: + release: + runs-on: self-hosted +``` + +When you have multiple kinds of self-hosted runners, you can distinguish between them using labels. In order to do so, you can specify one or more labels in your `Runner` or `RunnerDeployment` spec. + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: custom-runner +spec: + replicas: 1 + template: + spec: + repository: actions/actions-runner-controller + labels: + - custom-runner +``` + +Once this spec is applied, you can observe the labels for your runner from the repository or organization in the GitHub settings page for the repository or organization. You can now select a specific runner from your workflow by using the label in `runs-on`: + +```yaml +jobs: + release: + runs-on: custom-runner +``` + +When using labels there are a few things to be aware of: + +1. `self-hosted` is implict with every runner as this is an automatic label GitHub apply to any self-hosted runner. As a result ARC can treat all runners as having this label without having it explicitly defined in a runner's manifest. You do not need to explicitly define this label in your runner manifests (you can if you want though). +2. In addition to the `self-hosted` label, GitHub also applies a few other [default](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow#using-default-labels-to-route-jobs) labels to any self-hosted runner. The other default labels relate to the architecture of the runner and so can't be implicitly applied by ARC as ARC doesn't know if the runner is `linux` or `windows`, `x64` or `ARM64` etc. If you wish to use these labels in your workflows and have ARC scale runners accurately you must also add them to your runner manifests. \ No newline at end of file diff --git a/docs/using-custom-volumes.md b/docs/using-custom-volumes.md new file mode 100644 index 0000000000..bfdf8d77b6 --- /dev/null +++ b/docs/using-custom-volumes.md @@ -0,0 +1,205 @@ +# Using custom volumes + +## Custom Volume mounts + +You can configure your own custom volume mounts. For example to have the work/docker data in memory or on NVME SSD, for +i/o intensive builds. Other custom volume mounts should be possible as well, see [kubernetes documentation](https://kubernetes.io/docs/concepts/storage/volumes/) + +### RAM Disk + +Example how to place the runner work dir, docker sidecar and /tmp within the runner onto a ramdisk. +```yaml +kind: RunnerDeployment +spec: + template: + spec: + dockerVolumeMounts: + - mountPath: /var/lib/docker + name: docker + volumeMounts: + - mountPath: /tmp + name: tmp + volumes: + - name: docker + emptyDir: + medium: Memory + - name: work # this volume gets automatically used up for the workdir + emptyDir: + medium: Memory + - name: tmp + emptyDir: + medium: Memory + ephemeral: true # recommended to not leak data between builds. +``` + +### NVME SSD + +In this example we provide NVME backed storage for the workdir, docker sidecar and /tmp within the runner. +Here we use a working example on GKE, which will provide the NVME disk at /mnt/disks/ssd0. We will be placing the respective volumes in subdirs here and in order to be able to run multiple runners we will use the pod name as a prefix for subdirectories. Also the disk will fill up over time and disk space will not be freed until the node is removed. + +**Beware** that running these persistent backend volumes **leave data behind** between 2 different jobs on the workdir and `/tmp` with `ephemeral: false`. + +```yaml +kind: RunnerDeployment +spec: + template: + spec: + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + dockerVolumeMounts: + - mountPath: /var/lib/docker + name: docker + subPathExpr: $(POD_NAME)-docker + - mountPath: /runner/_work + name: work + subPathExpr: $(POD_NAME)-work + volumeMounts: + - mountPath: /runner/_work + name: work + subPathExpr: $(POD_NAME)-work + - mountPath: /tmp + name: tmp + subPathExpr: $(POD_NAME)-tmp + dockerEnv: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumes: + - hostPath: + path: /mnt/disks/ssd0 + name: docker + - hostPath: + path: /mnt/disks/ssd0 + name: work + - hostPath: + path: /mnt/disks/ssd0 + name: tmp + ephemeral: true # VERY important. otherwise data inside the workdir and /tmp is not cleared between builds +``` + +### Docker image layers caching + +> **Note**: Ensure that the volume mount is added to the container that is running the Docker daemon. + +`docker` stores pulled and built image layers in the [daemon's (not client)](https://docs.docker.com/get-started/overview/#docker-architecture) [local storage area](https://docs.docker.com/storage/storagedriver/#sharing-promotes-smaller-images) which is usually at `/var/lib/docker`. + +By leveraging RunnerSet's dynamic PV provisioning feature and your CSI driver, you can let ARC maintain a pool of PVs that are +reused across runner pods to retain `/var/lib/docker`. + +_Be sure to add the volume mount to the container that is supposed to run the docker daemon._ + +_Be sure to trigger several workflow runs before checking if the cache is effective. ARC requires an `Available` PV to be reused for the new runner pod, and a PV becomes `Available` only after some time after the previous runner pod that was using the PV terminated. See [the related discussion](https://github.com/actions/actions-runner-controller/discussions/1605)._ + +By default, ARC creates a sidecar container named `docker` within the runner pod for running the docker daemon. In that case, +it's where you need the volume mount so that the manifest looks like: + +```yaml +kind: RunnerSet +metadata: + name: example +spec: + template: + spec: + containers: + - name: docker + volumeMounts: + - name: var-lib-docker + mountPath: /var/lib/docker + volumeClaimTemplates: + - metadata: + name: var-lib-docker + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Mi + storageClassName: var-lib-docker +``` + +With `dockerdWithinRunnerContainer: true`, you need to add the volume mount to the `runner` container. + +### Go module and build caching + +`Go` is known to cache builds under `$HOME/.cache/go-build` and downloaded modules under `$HOME/pkg/mod`. +The module cache dir can be customized by setting `GOMOD_CACHE` so by setting it to somewhere under `$HOME/.cache`, +we can have a single PV to host both build and module cache, which might improve Go module downloading and building time. + +_Be sure to trigger several workflow runs before checking if the cache is effective. ARC requires an `Available` PV to be reused for the new runner pod, and a PV becomes `Available` only after some time after the previous runner pod that was using the PV terminated. See [the related discussion](https://github.com/actions/actions-runner-controller/discussions/1605)._ + +```yaml +kind: RunnerSet +metadata: + name: example +spec: + template: + spec: + containers: + - name: runner + env: + - name: GOMODCACHE + value: "/home/runner/.cache/go-mod" + volumeMounts: + - name: cache + mountPath: "/home/runner/.cache" + volumeClaimTemplates: + - metadata: + name: cache + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Mi + storageClassName: cache +``` + +### PV-backed runner work directory + +ARC works by automatically creating runner pods for running [`actions/runner`](https://github.com/actions/runner) and [running `config.sh`](https://docs.github.com/en/actions/hosting-your-own-runners/adding-self-hosted-runners#adding-a-self-hosted-runner-to-a-repository) which you had to ran manually without ARC. + +`config.sh` is the script provided by `actions/runner` to pre-configure the runner process before being started. One of the options provided by `config.sh` is `--work`, +which specifies the working directory where the runner runs your workflow jobs in. + +The volume and the partition that hosts the work directory should have several or dozens of GBs free space that might be used by your workflow jobs. + +By default, ARC uses `/runner/_work` as work directory, which is powered by Kubernetes's `emptyDir`. [`emptyDir` is usually backed by a directory created within a host's volume](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir), somewhere under `/var/lib/kuberntes/pods`. Therefore +your host's volume that is backing `/var/lib/kubernetes/pods` must have enough free space to serve all the concurrent runner pods that might be deployed onto your host at the same time. + +So, in case you see a job failure seemingly due to "disk full", it's very likely you need to reconfigure your host to have more free space. + +In case you can't rely on host's volume, consider using `RunnerSet` and backing the work directory with a ephemeral PV. + +Kubernetes 1.23 or greater provides the support for [generic ephemeral volumes](https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes), which is designed to support this exact use-case. It's defined in the Pod spec API so it isn't currently available for `RunnerDeployment`. `RunnerSet` is based on Kubernetes' `StatefulSet` which mostly embeds the Pod spec under `spec.template.spec`, so there you go. + +```yaml +kind: RunnerSet +metadata: + name: example +spec: + template: + spec: + containers: + - name: runner + volumeMounts: + - mountPath: /runner/_work + name: work + - name: docker + volumeMounts: + - mountPath: /runner/_work + name: work + volumes: + - name: work + ephemeral: + volumeClaimTemplate: + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "runner-work-dir" + resources: + requests: + storage: 10Gi +``` \ No newline at end of file diff --git a/docs/using-entrypoint-features.md b/docs/using-entrypoint-features.md new file mode 100644 index 0000000000..98ff04f7bd --- /dev/null +++ b/docs/using-entrypoint-features.md @@ -0,0 +1,69 @@ +# Using entrypoint features + +## Runner Entrypoint Features + +> Environment variable values must all be strings + +The entrypoint script is aware of a few environment variables for configuring features: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runnerdeployment +spec: + template: + spec: + env: + # Disable various runner entrypoint log levels + - name: LOG_DEBUG_DISABLED + value: "true" + - name: LOG_NOTICE_DISABLED + value: "true" + - name: LOG_WARNING_DISABLED + value: "true" + - name: LOG_ERROR_DISABLED + value: "true" + - name: LOG_SUCCESS_DISABLED + value: "true" + # Issues a sleep command at the start of the entrypoint + - name: STARTUP_DELAY_IN_SECONDS + value: "2" + # Specify the duration to wait for the docker daemon to be available + # The default duration of 120 seconds is sometimes too short + # to reliably wait for the docker daemon to start + # See https://github.com/actions/actions-runner-controller/issues/1804 + - name: WAIT_FOR_DOCKER_SECONDS + value: 120 + # Disables the wait for the docker daemon to be available check + - name: DISABLE_WAIT_FOR_DOCKER + value: "true" + # Disables automatic runner updates + # WARNING : Upon a new version of the actions/runner software being released + # GitHub stops allocating jobs to runners on the previous version of the + # actions/runner software after 30 days. + - name: DISABLE_RUNNER_UPDATE + value: "true" +``` + +There are a few advanced envvars also that are available only for dind runners: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runnerdeployment +spec: + template: + spec: + dockerdWithinRunnerContainer: true + image: summerwind/actions-runner-dind + env: + # Sets the respective default-address-pools fields within dockerd daemon.json + # See https://github.com/actions/actions-runner-controller/pull/1971 for more information. + # Also see https://github.com/docker/docs/issues/8663 for the default base/size values in dockerd. + - name: DOCKER_DEFAULT_ADDRESS_POOL_BASE + value: "172.17.0.0/12" + - name: DOCKER_DEFAULT_ADDRESS_POOL_SIZE + value: "24" +``` \ No newline at end of file From 1fefa00b2b9dfd9d1211d9e3071100f802d41b9e Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 9 Jan 2023 13:51:41 +0100 Subject: [PATCH 003/561] Enable dependabot by creating dependabot.yml (#2128) --- .github/dependabot.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..e0871f93f4 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "gomod" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" From de0510530ec9dc9b594406164c7c59ce7729d9e6 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Wed, 11 Jan 2023 09:34:54 +0100 Subject: [PATCH 004/561] Update release workflows post-migration (#2120) * Fix to trigger extracted release workflows * Fix input descriptions * Add tool installation steps * Fix indentation * Fix token passing * Fix release tag name reference * Fix release tag name reference * Fix release tag name * Update publish-canary workflow * Update workflows * Fix target org * Add push to registries flag * Update publish-chart * Add job summary to publish-arc * Enhance summary message * Add publish canary workflow * Remove backticks * Fix variable * Fix index.yaml location and add job summary * Fix publish chart workflow * Enhance job summary for publish-chart * Enhance chart version identification and fix chart upload * Fix cr index * Fix cr index and add comments * Fix comment * Pin marketplace actions * Remove 3rd party action * Add comments, parametrise where needed * Add release process brief * Change target repo * Removing failsafe * Removing failsafe * Replace DOCKER_USER with DOCKERHUB_USERNAME --- .github/workflows/publish-arc.yaml | 78 ++++++++++++------- .github/workflows/publish-canary.yaml | 59 ++++++++------ .github/workflows/publish-chart.yaml | 96 ++++++++++++++++++++--- .github/workflows/runners.yaml | 107 ++++++-------------------- CONTRIBUTING.md | 8 +- 5 files changed, 202 insertions(+), 146 deletions(-) diff --git a/.github/workflows/publish-arc.yaml b/.github/workflows/publish-arc.yaml index bbc1f74ec4..5c60d5b645 100644 --- a/.github/workflows/publish-arc.yaml +++ b/.github/workflows/publish-arc.yaml @@ -1,21 +1,34 @@ name: Publish ARC +# Revert to https://github.com/actions-runner-controller/releases#releases +# for details on why we use this approach on: release: types: - published + workflow_dispatch: + inputs: + release_tag_name: + description: 'Tag name of the release to publish' + required: true + push_to_registries: + description: 'Push images to registries' + required: true + type: boolean + default: false -# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps permissions: contents: write packages: write +env: + TARGET_ORG: actions-runner-controller + TARGET_REPO: actions-runner-controller + jobs: release-controller: name: Release runs-on: ubuntu-latest - env: - DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }} steps: - name: Checkout uses: actions/checkout@v3 @@ -35,36 +48,45 @@ jobs: tar zxvf ghr_v0.13.0_linux_amd64.tar.gz sudo mv ghr_v0.13.0_linux_amd64/ghr /usr/local/bin - - name: Set version - run: echo "VERSION=$(cat ${GITHUB_EVENT_PATH} | jq -r '.release.tag_name')" >> $GITHUB_ENV - - name: Upload artifacts env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | make github-release - - name: Setup Docker Environment - uses: ./.github/actions/setup-docker-environment + - name: Get Token + id: get_workflow_token + uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db with: - username: ${{ env.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKER_ACCESS_TOKEN }} - ghcr_username: ${{ github.actor }} - ghcr_password: ${{ secrets.GITHUB_TOKEN }} + application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} + application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} + organization: ${{ env.TARGET_ORG }} - - name: Build and Push - uses: docker/build-push-action@v3 - with: - file: Dockerfile - platforms: linux/amd64,linux/arm64 - build-args: VERSION=${{ env.VERSION }} - push: true - tags: | - ${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest - ${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }} - ${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}-${{ env.sha_short }} - ghcr.io/actions-runner-controller/actions-runner-controller:latest - ghcr.io/actions-runner-controller/actions-runner-controller:${{ env.VERSION }} - ghcr.io/actions-runner-controller/actions-runner-controller:${{ env.VERSION }}-${{ env.sha_short }} - cache-from: type=gha - cache-to: type=gha,mode=max + - name: Set release tag name + run: | + # Define the release tag name based on the event type + if [[ "${{ github.event_name }}" == "release" ]]; then + echo "RELEASE_TAG_NAME=$(cat ${GITHUB_EVENT_PATH} | jq -r '.release.tag_name')" >> $GITHUB_ENV + elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + echo "RELEASE_TAG_NAME=${{ github.event.inputs.release_tag_name }}" >> $GITHUB_ENV + fi + + - name: Trigger Build And Push Images To Registries + run: | + # Authenticate + gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }} + + # Trigger the workflow run + jq -n '{"event_type": "arc", "client_payload": {"release_tag_name": "${{ env.RELEASE_TAG_NAME }}", "push_to_registries": ${{ inputs.push_to_registries }}}}' \ + | gh api -X POST /repos/actions-runner-controller/releases/dispatches --input - + + - name: Job summary + run: | + echo "The [publish-arc](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-arc.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY + echo "- Release tag: ${{ env.RELEASE_TAG_NAME }}" >> $GITHUB_STEP_SUMMARY + echo "- Push to registries: ${{ inputs.push_to_registries }}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Status:**" >> $GITHUB_STEP_SUMMARY + echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml)" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/publish-canary.yaml b/.github/workflows/publish-canary.yaml index 6d2a25d1df..3939030738 100644 --- a/.github/workflows/publish-canary.yaml +++ b/.github/workflows/publish-canary.yaml @@ -1,5 +1,7 @@ name: Publish Canary Image +# Revert to https://github.com/actions-runner-controller/releases#releases +# for details on why we use this approach on: push: branches: @@ -19,41 +21,50 @@ on: - 'LICENSE' - 'Makefile' +env: + # Safeguard to prevent pushing images to registeries after build + PUSH_TO_REGISTRIES: true + TARGET_ORG: actions-runner-controller + TARGET_REPO: actions-runner-controller + # https://docs.github.com/en/rest/overview/permissions-required-for-github-apps permissions: contents: read - packages: write jobs: canary-build: name: Build and Publish Canary Image runs-on: ubuntu-latest env: - DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }} + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} steps: - name: Checkout uses: actions/checkout@v3 - - name: Setup Docker Environment - id: vars - uses: ./.github/actions/setup-docker-environment - with: - username: ${{ env.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKER_ACCESS_TOKEN }} - ghcr_username: ${{ github.actor }} - ghcr_password: ${{ secrets.GITHUB_TOKEN }} - - # Considered unstable builds - # See Issue #285, PR #286, and PR #323 for more information - - name: Build and Push - uses: docker/build-push-action@v3 + - name: Get Token + id: get_workflow_token + uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db with: - file: Dockerfile - platforms: linux/amd64,linux/arm64 - build-args: VERSION=canary-${{ github.sha }} - push: true - tags: | - ${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary - ghcr.io/${{ github.repository }}:canary - cache-from: type=gha,scope=arc-canary - cache-to: type=gha,mode=max,scope=arc-canary + application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} + application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} + organization: ${{ env.TARGET_ORG }} + + - name: Trigger Build And Push Images To Registries + run: | + # Authenticate + gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }} + + # Trigger the workflow run + jq -n '{"event_type": "canary", "client_payload": {"sha": "${{ github.sha }}", "push_to_registries": ${{ env.PUSH_TO_REGISTRIES }}}}' \ + | gh api -X POST /repos/actions-runner-controller/releases/dispatches --input - + + - name: Job summary + run: | + echo "The [publish-canary](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-canary.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY + echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY + echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Status:**" >> $GITHUB_STEP_SUMMARY + echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/publish-chart.yaml b/.github/workflows/publish-chart.yaml index 2d8b587dc1..b295394374 100644 --- a/.github/workflows/publish-chart.yaml +++ b/.github/workflows/publish-chart.yaml @@ -1,5 +1,7 @@ name: Publish Helm Chart +# Revert to https://github.com/actions-runner-controller/releases#releases +# for details on why we use this approach on: push: branches: @@ -86,20 +88,31 @@ jobs: if: steps.list-changed.outputs.changed == 'true' run: ct install --config charts/.ci/ct-config.yaml - # WARNING: This relies on the latest release being inat the top of the JSON from GitHub and a clean chart.yaml + # WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml - name: Check if Chart Publish is Needed id: publish-chart-step run: | - CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/actions/actions-runner-controller/master/charts/actions-runner-controller/Chart.yaml) + CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml) NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2) - RELEASE_LIST=$(curl -fs https://api.github.com/repos/actions/actions-runner-controller/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4) + RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4) LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1) - echo "Chart version in master : $NEW_CHART_VERSION" - echo "Latest release chart version : $LATEST_RELEASED_CHART_VERSION" + echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV + echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION ]]; then - echo "::set-output name=publish::true" + echo "publish=true" >> $GITHUB_OUTPUT + else + echo "publish=false" >> $GITHUB_OUTPUT fi + - name: Job summary + run: | + echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Status:**" >> $GITHUB_STEP_SUMMARY + echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY + echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY + echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY + publish-chart: if: needs.lint-chart.outputs.publish-chart == 'true' needs: lint-chart @@ -107,8 +120,11 @@ jobs: runs-on: ubuntu-latest permissions: contents: write # for helm/chart-releaser-action to push chart release and create a release + env: + CHART_TARGET_ORG: actions-runner-controller + CHART_TARGET_REPO: actions-runner-controller.github.io + CHART_TARGET_BRANCH: main - steps: - name: Checkout uses: actions/checkout@v3 @@ -120,8 +136,68 @@ jobs: git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - - name: Run chart-releaser + - name: Get Token + id: get_workflow_token + uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db + with: + application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} + application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} + organization: ${{ env.CHART_TARGET_ORG }} + + - name: Install chart-releaser uses: helm/chart-releaser-action@v1.4.1 - env: - CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + with: + install_only: true + install_dir: ${{ github.workspace }}/bin + + - name: Package and upload release assets + run: | + cr package \ + ${{ github.workspace }}/charts/actions-runner-controller/ \ + --package-path .cr-release-packages + cr upload \ + --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ + --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ + --package-path .cr-release-packages \ + --token ${{ secrets.GITHUB_TOKEN }} + + - name: Generate updated index.yaml + run: | + cr index \ + --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ + --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ + --index-path ${{ github.workspace }}/index.yaml \ + --pages-branch 'gh-pages' \ + --pages-index-path 'index.yaml' + + # Chart Release was never intended to publish to a different repo + # this workaround is intended to move the index.yaml to the target repo + # where the github pages are hosted + - name: Checkout pages repository + uses: actions/checkout@v3 + with: + repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }} + path: ${{ env.CHART_TARGET_REPO }} + ref: ${{ env.CHART_TARGET_BRANCH }} + token: ${{ steps.get_workflow_token.outputs.token }} + + - name: Copy index.yaml + run: | + cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml + + - name: Commit and push + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + git add . + git commit -m "Update index.yaml" + git push + working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }} + + - name: Job summary + run: | + echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Status:**" >> $GITHUB_STEP_SUMMARY + echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/runners.yaml b/.github/workflows/runners.yaml index a213cef051..4780953c11 100644 --- a/.github/workflows/runners.yaml +++ b/.github/workflows/runners.yaml @@ -1,5 +1,7 @@ name: Runners +# Revert to https://github.com/actions-runner-controller/releases#releases +# for details on why we use this approach on: pull_request: types: @@ -25,97 +27,36 @@ on: - '!**.md' env: + # Safeguard to prevent pushing images to registeries after build + PUSH_TO_REGISTRIES: true + TARGET_ORG: actions-runner-controller + TARGET_WORKFLOW: release-runners.yaml RUNNER_VERSION: 2.299.1 DOCKER_VERSION: 20.10.21 RUNNER_CONTAINER_HOOKS_VERSION: 0.1.3 - DOCKERHUB_USERNAME: summerwind jobs: build-runners: - name: Build ${{ matrix.name }}-${{ matrix.os-name }}-${{ matrix.os-version }} + name: Trigger Build and Push of Runner Images runs-on: ubuntu-latest - permissions: - packages: write - contents: read - strategy: - fail-fast: false - matrix: - include: - - name: actions-runner - os-name: ubuntu - os-version: 20.04 - latest: "true" - - name: actions-runner - os-name: ubuntu - os-version: 22.04 - latest: "false" - - name: actions-runner-dind - os-name: ubuntu - os-version: 20.04 - latest: "true" - - name: actions-runner-dind - os-name: ubuntu - os-version: 22.04 - latest: "false" - - name: actions-runner-dind-rootless - os-name: ubuntu - os-version: 20.04 - latest: "true" - - name: actions-runner-dind-rootless - os-name: ubuntu - os-version: 22.04 - latest: "false" - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Setup Docker Environment - uses: ./.github/actions/setup-docker-environment + - name: Get Token + id: get_workflow_token + uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db with: - username: ${{ env.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKER_ACCESS_TOKEN }} - ghcr_username: ${{ github.actor }} - ghcr_password: ${{ secrets.GITHUB_TOKEN }} + application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} + application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} + organization: ${{ env.TARGET_ORG }} - - name: Build and Push Versioned Tags - uses: docker/build-push-action@v3 - with: - context: ./runner - file: ./runner/${{ matrix.name }}.${{ matrix.os-name }}-${{ matrix.os-version }}.dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }} - build-args: | - RUNNER_VERSION=${{ env.RUNNER_VERSION }} - DOCKER_VERSION=${{ env.DOCKER_VERSION }} - RUNNER_CONTAINER_HOOKS_VERSION=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }} - tags: | - ${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }} - ${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ env.sha_short }} - ${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:${{ matrix.os-name }}-${{ matrix.os-version }} - ghcr.io/${{ github.repository }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }} - ghcr.io/${{ github.repository }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ env.sha_short }} - ghcr.io/${{ github.repository }}/${{ matrix.name }}:${{ matrix.os-name }}-${{ matrix.os-version }} - cache-from: type=gha,scope=build-${{ matrix.name }}-${{ matrix.os-name }}-${{ matrix.os-version }} - cache-to: type=gha,mode=max,scope=build-${{ matrix.name }}-${{ matrix.os-name }}-${{ matrix.os-version }} + - name: Trigger Build And Push Runner Images To Registries + run: | + # Authenticate + gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }} - # NOTE : Only to be used on the 20.04 image until we remove the latest tag entirely - # at which point this step needs to be deleted - # https://github.com/actions/actions-runner-controller/issues/2056 - - name: Build and Push Latest Tags - if: ${{ matrix.latest == 'true' }} - uses: docker/build-push-action@v3 - with: - context: ./runner - file: ./runner/${{ matrix.name }}.${{ matrix.os-name }}-${{ matrix.os-version }}.dockerfile - platforms: linux/amd64,linux/arm64 - push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }} - build-args: | - RUNNER_VERSION=${{ env.RUNNER_VERSION }} - DOCKER_VERSION=${{ env.DOCKER_VERSION }} - RUNNER_CONTAINER_HOOKS_VERSION=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }} - tags: | - ${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest - ghcr.io/${{ github.repository }}/${{ matrix.name }}:latest - cache-from: type=gha,scope=build-${{ matrix.name }}-${{ matrix.os-name }}-${{ matrix.os-version }} - cache-to: type=gha,mode=max,scope=build-${{ matrix.name }}-${{ matrix.os-name }}-${{ matrix.os-version }} + # Trigger the workflow run + gh workflow run ${{ env.TARGET_WORKFLOW }} -R ${{ env.TARGET_ORG }}/releases \ + -f runner_version=${{ env.RUNNER_VERSION }} \ + -f docker_version=${{ env.DOCKER_VERSION }} \ + -f runner_container_hooks_version=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }} \ + -f sha='${{ github.sha }}' \ + -f push_to_registries=${{ env.PUSH_TO_REGISTRIES }} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e65039ab11..d236d779ef 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -210,4 +210,10 @@ The process would look like the below: - Run `NAME=$DOCKER_USER/actions-runner-controller VERSION=canary make docker-build docker-push` for a custom container image build - Update your actions-runner-controller's controller-manager deployment to use the new image, `$DOCKER_USER/actions-runner-controller:canary` -Please also note that you need to replace `$DOCKER_USER` with your own DockerHub account name. \ No newline at end of file +Please also note that you need to replace `$DOCKER_USER` with your own DockerHub account name. + +## Release process + +Only the maintainers can release a new version of actions-runner-controller, publish a new version of the helm charts, and runner images. + +All release workflows have been moved to [actions-runner-controller/releases](https://github.com/actions-runner-controller/releases) since the packages are owned by the former organization. From fc402abcc300a4dedb70a58e6e206b123baf501d Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Wed, 11 Jan 2023 14:29:32 +0100 Subject: [PATCH 005/561] Update runner version to 2.300.2 (#2141) * Update runner version to 2.300.2 * Bump up runner and container hooks versions * Bump up runner version * Bump up runner and container hooks versions * Update actions-runner-dind-rootless.ubuntu-22.04.dockerfile * Update actions-runner-dind.ubuntu-20.04.dockerfile * Update actions-runner-dind.ubuntu-22.04.dockerfile * Update actions-runner.ubuntu-20.04.dockerfile * Update actions-runner.ubuntu-22.04.dockerfile * Bump up runner versions * Bump up container hooks versions --- .github/workflows/runners.yaml | 4 ++-- Makefile | 2 +- runner/Makefile | 4 ++-- runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile | 4 ++-- runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile | 4 ++-- runner/actions-runner-dind.ubuntu-20.04.dockerfile | 4 ++-- runner/actions-runner-dind.ubuntu-22.04.dockerfile | 4 ++-- runner/actions-runner.ubuntu-20.04.dockerfile | 4 ++-- runner/actions-runner.ubuntu-22.04.dockerfile | 4 ++-- test/e2e/e2e_test.go | 2 +- 10 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.github/workflows/runners.yaml b/.github/workflows/runners.yaml index 4780953c11..3cb57a2b40 100644 --- a/.github/workflows/runners.yaml +++ b/.github/workflows/runners.yaml @@ -31,9 +31,9 @@ env: PUSH_TO_REGISTRIES: true TARGET_ORG: actions-runner-controller TARGET_WORKFLOW: release-runners.yaml - RUNNER_VERSION: 2.299.1 + RUNNER_VERSION: 2.300.2 DOCKER_VERSION: 20.10.21 - RUNNER_CONTAINER_HOOKS_VERSION: 0.1.3 + RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0 jobs: build-runners: diff --git a/Makefile b/Makefile index db064b7a55..4c4389f4fa 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ else endif DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1) VERSION ?= dev -RUNNER_VERSION ?= 2.299.1 +RUNNER_VERSION ?= 2.300.2 TARGETPLATFORM ?= $(shell arch) RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_TAG ?= ${VERSION} diff --git a/runner/Makefile b/runner/Makefile index 3dffc9a08b..baafc49dbd 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -6,8 +6,8 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) -RUNNER_VERSION ?= 2.299.1 -RUNNER_CONTAINER_HOOKS_VERSION ?= 0.1.3 +RUNNER_VERSION ?= 2.300.2 +RUNNER_CONTAINER_HOOKS_VERSION ?= 0.2.0 DOCKER_VERSION ?= 20.10.21 # default list of platforms for which multiarch image is built diff --git a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile index 437f6b5098..faddc4edd7 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile @@ -1,8 +1,8 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.299.1 -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.2 +ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ENV CHANNEL=stable ARG DOCKER_COMPOSE_VERSION=v2.6.0 diff --git a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile index de63d3cc00..72ca96ba08 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile @@ -1,8 +1,8 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.299.1 -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.3 +ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ENV CHANNEL=stable ARG DOCKER_COMPOSE_VERSION=v2.12.2 diff --git a/runner/actions-runner-dind.ubuntu-20.04.dockerfile b/runner/actions-runner-dind.ubuntu-20.04.dockerfile index 8e57d97b58..6aab2d60c7 100644 --- a/runner/actions-runner-dind.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-20.04.dockerfile @@ -1,8 +1,8 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.299.1 -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.2 +ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.18 diff --git a/runner/actions-runner-dind.ubuntu-22.04.dockerfile b/runner/actions-runner-dind.ubuntu-22.04.dockerfile index 39bd2422a0..2170d10a7c 100644 --- a/runner/actions-runner-dind.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-22.04.dockerfile @@ -1,8 +1,8 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.299.1 -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.3 +ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.21 diff --git a/runner/actions-runner.ubuntu-20.04.dockerfile b/runner/actions-runner.ubuntu-20.04.dockerfile index ab45c29ade..94ceb531af 100644 --- a/runner/actions-runner.ubuntu-20.04.dockerfile +++ b/runner/actions-runner.ubuntu-20.04.dockerfile @@ -1,8 +1,8 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.299.1 -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.2 +ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.18 diff --git a/runner/actions-runner.ubuntu-22.04.dockerfile b/runner/actions-runner.ubuntu-22.04.dockerfile index b9fc36af80..3b998b76cf 100644 --- a/runner/actions-runner.ubuntu-22.04.dockerfile +++ b/runner/actions-runner.ubuntu-22.04.dockerfile @@ -1,8 +1,8 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.299.1 -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.3 +ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.21 diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index d21f3614ce..a5ba4a75fb 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -41,7 +41,7 @@ var ( testResultCMNamePrefix = "test-result-" - RunnerVersion = "2.299.1" + RunnerVersion = "2.300.2" ) // If you're willing to run this test via VS Code "run test" or "debug test", From 5d56485eee9c06fb5a6a94dc8fb257016a8c6ee5 Mon Sep 17 00:00:00 2001 From: Siara <108543037+SiaraMist@users.noreply.github.com> Date: Thu, 12 Jan 2023 02:37:58 -0800 Subject: [PATCH 006/561] Fix broken links in docs (#2144) --- CONTRIBUTING.md | 2 +- README.md | 2 +- docs/about-arc.md | 2 +- docs/quickstart.md | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d236d779ef..2399a8ce11 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -201,7 +201,7 @@ The maintainers will manage releases and publishing new charts. We always appreciate your help in testing open pull requests by deploying custom builds of actions-runner-controller onto your own environment, so that we are extra sure we didn't break anything. -It is especially true when the pull request is about GitHub Enterprise, both GHEC and GHES, as [maintainers don't have GitHub Enterprise environments for testing](docs/detailed-docs.md#github-enterprise-support). +It is especially true when the pull request is about GitHub Enterprise, both GHEC and GHES, as [maintainers don't have GitHub Enterprise environments for testing](docs/about-arc.md#github-enterprise-support). The process would look like the below: diff --git a/README.md b/README.md index dd14c3a56b..338a74057e 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ The documentation is kept inline with master@HEAD, we do our best to highlight a ## Getting Started To give ARC a try with just a handful of commands, Please refer to the [Quickstart guide](/docs/quickstart.md). -For an overview of ARC, please refer to [ARC Overview](https://github.com/actions/actions-runner-controller/blob/master/docs/Actions-Runner-Controller-Overview.md) +For an overview of ARC, please refer to [About ARC](https://github.com/actions/actions-runner-controller/blob/master/docs/about-arc.md) For more information, please refer to detailed documentation below! diff --git a/docs/about-arc.md b/docs/about-arc.md index ce621d04bc..532d29f006 100644 --- a/docs/about-arc.md +++ b/docs/about-arc.md @@ -122,7 +122,7 @@ spec: scaleDownFactor: '0.5' ``` -For more details - please see "[Pull Driven Scaling](detailed-docs.md#pull-driven-scaling)." +For more details - please see "[Pull Driven Scaling](automatically-scaling-runners.md#pull-driven-scaling)." *The period between polls is defined by the controller's `--sync-period` flag. If this flag isn't provided then the controller defaults to a sync period of `1m`, this can be configured in seconds or minutes.* diff --git a/docs/quickstart.md b/docs/quickstart.md index 04eb567e5f..013cfc5f2c 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -12,7 +12,7 @@ With ARC you can : ## Overview -For an overview of ARC, please refer to "[ARC Overview](https://github.com/actions/actions-runner-controller/blob/master/docs/Actions-Runner-Controller-Overview.md)." +For an overview of ARC, please refer to "[About ARC](https://github.com/actions/actions-runner-controller/blob/master/docs/about-arc.md)." ## Getting Started @@ -140,7 +140,7 @@ There is also a quick start guide to get started on Actions, For more informatio ## Learn more -For more detailed documentation, please refer to "[Detailed Documentation](https://github.com/actions/actions-runner-controller/blob/master/docs/detailed-docs.md)." +For more detailed documentation, please refer to "[Actions Runner Controller Documentation](https://github.com/actions/actions-runner-controller/blob/master/README.md#documentation)." ## Contributing From 1202bf93e6d5b4fcfb7f992a1dd1f18da7f94894 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Fri, 13 Jan 2023 07:14:36 +0900 Subject: [PATCH 007/561] Fix various golangci-lint errors (#2147) that we introduced via controller-runtime upgrade and via the removal of legacy pull-based scale triggers (#2001). --- .../horizontal_runner_autoscaler_webhook.go | 85 ------------------- .../integration_test.go | 3 - .../actions.summerwind.net/suite_test.go | 15 ++-- 3 files changed, 6 insertions(+), 97 deletions(-) diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go index 093e91e505..e0cee795aa 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "net/http" - "strings" "sync" "time" @@ -333,24 +332,6 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) findHRAsByKey(ctx con return hras, nil } -func matchTriggerConditionAgainstEvent(types []string, eventAction *string) bool { - if len(types) == 0 { - return true - } - - if eventAction == nil { - return false - } - - for _, tpe := range types { - if tpe == *eventAction { - return true - } - } - - return false -} - type ScaleTarget struct { v1alpha1.HorizontalRunnerAutoscaler v1alpha1.ScaleUpTrigger @@ -358,72 +339,6 @@ type ScaleTarget struct { log *logr.Logger } -func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) searchScaleTargets(hras []v1alpha1.HorizontalRunnerAutoscaler, f func(v1alpha1.ScaleUpTrigger) bool) []ScaleTarget { - var matched []ScaleTarget - - for _, hra := range hras { - if !hra.ObjectMeta.DeletionTimestamp.IsZero() { - continue - } - - for _, scaleUpTrigger := range hra.Spec.ScaleUpTriggers { - if !f(scaleUpTrigger) { - continue - } - - matched = append(matched, ScaleTarget{ - HorizontalRunnerAutoscaler: hra, - ScaleUpTrigger: scaleUpTrigger, - }) - } - } - - return matched -} - -func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleTarget(ctx context.Context, name string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) { - hras, err := autoscaler.findHRAsByKey(ctx, name) - if err != nil { - return nil, err - } - - autoscaler.Log.V(1).Info(fmt.Sprintf("Found %d HRAs by key", len(hras)), "key", name) - - targets := autoscaler.searchScaleTargets(hras, f) - - n := len(targets) - - if n == 0 { - return nil, nil - } - - if n > 1 { - var scaleTargetIDs []string - - for _, t := range targets { - scaleTargetIDs = append(scaleTargetIDs, t.HorizontalRunnerAutoscaler.Name) - } - - autoscaler.Log.Info( - "Found too many scale targets: "+ - "It must be exactly one to avoid ambiguity. "+ - "Either set Namespace for the webhook-based autoscaler to let it only find HRAs in the namespace, "+ - "or update Repository, Organization, or Enterprise fields in your RunnerDeployment resources to fix the ambiguity.", - "scaleTargets", strings.Join(scaleTargetIDs, ",")) - - return nil, nil - } - - return &targets[0], nil -} - -func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTarget(ctx context.Context, log logr.Logger, repo, owner, ownerType, enterprise string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) { - scaleTarget := func(value string) (*ScaleTarget, error) { - return autoscaler.getScaleTarget(ctx, value, f) - } - return autoscaler.getScaleUpTargetWithFunction(ctx, log, repo, owner, ownerType, enterprise, scaleTarget) -} - func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getJobScaleUpTargetForRepoOrOrg( ctx context.Context, log logr.Logger, repo, owner, ownerType, enterprise string, labels []string, ) (*ScaleTarget, error) { diff --git a/controllers/actions.summerwind.net/integration_test.go b/controllers/actions.summerwind.net/integration_test.go index b6dd6e32f7..bf9d8607cf 100644 --- a/controllers/actions.summerwind.net/integration_test.go +++ b/controllers/actions.summerwind.net/integration_test.go @@ -40,9 +40,6 @@ var ( workflowRunsFor3Replicas = `{"total_count": 5, "workflow_runs":[{"status":"queued"}, {"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"` workflowRunsFor3Replicas_queued = `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"queued"}]}"` workflowRunsFor3Replicas_in_progress = `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"` - workflowRunsFor1Replicas = `{"total_count": 6, "workflow_runs":[{"status":"queued"}, {"status":"completed"}, {"status":"completed"}, {"status":"completed"}, {"status":"completed"}]}"` - workflowRunsFor1Replicas_queued = `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"` - workflowRunsFor1Replicas_in_progress = `{"total_count": 0, "workflow_runs":[]}"` ) // SetupIntegrationTest will set up a testing environment. diff --git a/controllers/actions.summerwind.net/suite_test.go b/controllers/actions.summerwind.net/suite_test.go index 442f28d17f..6918bd91ff 100644 --- a/controllers/actions.summerwind.net/suite_test.go +++ b/controllers/actions.summerwind.net/suite_test.go @@ -57,19 +57,16 @@ func TestAPIs(t *testing.T) { var _ = BeforeSuite(func(done Done) { logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) - var apiServerFlags []string - - apiServerFlags = append(apiServerFlags, envtest.DefaultKubeAPIServerFlags...) - // Avoids the following error: - // 2021-03-19T15:14:11.673+0900 ERROR controller-runtime.controller Reconciler error {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"} - apiServerFlags = append(apiServerFlags, "--allow-privileged=true") - By("bootstrapping test environment") testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("../..", "config", "crd", "bases")}, - KubeAPIServerFlags: apiServerFlags, + CRDDirectoryPaths: []string{filepath.Join("../..", "config", "crd", "bases")}, } + // Avoids the following error: + // 2021-03-19T15:14:11.673+0900 ERROR controller-runtime.controller Reconciler error {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"} + testEnv.ControlPlane.GetAPIServer().Configure(). + Append("allow-privileged", "true") + var err error cfg, err = testEnv.Start() Expect(err).ToNot(HaveOccurred()) From 11773b81673e0386b177977fdeabe759a91e9b17 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Fri, 13 Jan 2023 07:15:05 +0900 Subject: [PATCH 008/561] fix(e2e): Use the correct full chart name in test (#2146) The whole E2E test breaks due to the invalid chart name without this fix. --- test/e2e/e2e_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index a5ba4a75fb..f2b82bbfd4 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -106,7 +106,7 @@ func TestE2E(t *testing.T) { label: "stable", controller: "summerwind/actions-runner-controller", controllerVer: "v0.25.2", - chart: "actions/actions-runner-controller", + chart: "actions-runner-controller/actions-runner-controller", // 0.20.2 accidentally added support for runner-status-update which isn't supported by ARC 0.25.2. // With some chart values, the controller end up with crashlooping with `flag provided but not defined: -runner-status-update-hook`. chartVer: "0.20.1", From 91f77bd2a1cfb49159f4235046e7ea60be191a0c Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Fri, 13 Jan 2023 07:15:37 +0900 Subject: [PATCH 009/561] fix(e2e): Make runner graceful shutdown checker cancellable (#2145) So that the whole test run can be stopped immediately with a failure, without failing until the verify timeout. --- test/e2e/e2e_test.go | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index f2b82bbfd4..9d88d2d0dc 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -222,12 +222,26 @@ func TestE2E(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) go func() { + var cancelled bool + defer func() { + if !cancelled { + t.Logf("Stopping the continuous rolling-update of runners due to error(s)") + } + cancel() + }() + for i := 1; ; i++ { + if t.Failed() { + cancelled = true + return + } + select { case _, ok := <-ctx.Done(): if !ok { t.Logf("Stopping the continuous rolling-update of runners") } + cancelled = true default: time.Sleep(60 * time.Second) @@ -242,7 +256,7 @@ func TestE2E(t *testing.T) { }) t.Run("Verify workflow run result", func(t *testing.T) { - env.verifyActionsWorkflowRun(t, testID) + env.verifyActionsWorkflowRun(t, ctx, testID) }) }) @@ -322,12 +336,26 @@ func TestE2E(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) go func() { + var cancelled bool + defer func() { + if !cancelled { + t.Logf("Stopping the continuous rolling-update of runners due to error(s)") + } + cancel() + }() + for i := 1; ; i++ { + if t.Failed() { + cancelled = true + return + } + select { case _, ok := <-ctx.Done(): if !ok { t.Logf("Stopping the continuous rolling-update of runners") } + cancelled = true return default: time.Sleep(10 * time.Second) @@ -347,7 +375,7 @@ func TestE2E(t *testing.T) { }) t.Run("Verify workflow run result", func(t *testing.T) { - env.verifyActionsWorkflowRun(t, testID) + env.verifyActionsWorkflowRun(t, ctx, testID) }) }) @@ -887,10 +915,10 @@ func (e *env) testJobs(testID string) []job { return createTestJobs(testID, testResultCMNamePrefix, 6) } -func (e *env) verifyActionsWorkflowRun(t *testing.T, testID string) { +func (e *env) verifyActionsWorkflowRun(t *testing.T, ctx context.Context, testID string) { t.Helper() - verifyActionsWorkflowRun(t, e.Env, e.testJobs(testID), e.verifyTimeout(), e.getKubectlConfig()) + verifyActionsWorkflowRun(t, ctx, e.Env, e.testJobs(testID), e.verifyTimeout(), e.getKubectlConfig()) } func (e *env) verifyTimeout() time.Duration { @@ -1162,7 +1190,7 @@ kubectl create cm %s$id --from-literal=status=ok } } -func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job, timeout time.Duration, cmCfg testing.KubectlConfig) { +func verifyActionsWorkflowRun(t *testing.T, ctx context.Context, env *testing.Env, testJobs []job, timeout time.Duration, cmCfg testing.KubectlConfig) { t.Helper() var expected []string @@ -1171,7 +1199,7 @@ func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job, ti expected = append(expected, "ok") } - gomega.NewGomegaWithT(t).Eventually(func() ([]string, error) { + gomega.NewGomegaWithT(t).Eventually(ctx, func() ([]string, error) { var results []string var errs []error From 527d57779e58469b2a7cd499fa2a25130a1152b9 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Thu, 12 Jan 2023 23:24:33 +0100 Subject: [PATCH 010/561] Add job summary to the runners release workflow (#2140) * Add and update job summaries * Fix workflow reference links * Fix / deny push to registries on PR * Rename the workflow to match the releases repo --- .../{runners.yaml => release-runners.yaml} | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) rename .github/workflows/{runners.yaml => release-runners.yaml} (61%) diff --git a/.github/workflows/runners.yaml b/.github/workflows/release-runners.yaml similarity index 61% rename from .github/workflows/runners.yaml rename to .github/workflows/release-runners.yaml index 3cb57a2b40..6d27b58ddd 100644 --- a/.github/workflows/runners.yaml +++ b/.github/workflows/release-runners.yaml @@ -3,18 +3,6 @@ name: Runners # Revert to https://github.com/actions-runner-controller/releases#releases # for details on why we use this approach on: - pull_request: - types: - - opened - - synchronize - - reopened - branches: - - 'master' - paths: - - 'runner/**' - - '!runner/Makefile' - - '.github/workflows/runners.yaml' - - '!**.md' # We must do a trigger on a push: instead of a types: closed so GitHub Secrets # are available to the workflow run push: @@ -60,3 +48,17 @@ jobs: -f runner_container_hooks_version=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }} \ -f sha='${{ github.sha }}' \ -f push_to_registries=${{ env.PUSH_TO_REGISTRIES }} + + - name: Job summary + run: | + echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY + echo "- runner_version: ${{ env.RUNNER_VERSION }}" >> $GITHUB_STEP_SUMMARY + echo "- docker_version: ${{ env.DOCKER_VERSION }}" >> $GITHUB_STEP_SUMMARY + echo "- runner_container_hooks_version: ${{ env.RUNNER_CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY + echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY + echo "- push_to_registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Status:**" >> $GITHUB_STEP_SUMMARY + echo "[https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml)" >> $GITHUB_STEP_SUMMARY From d8cd5a8ad15bbbe9f4ad8f20b0a3de171fe60fd9 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Thu, 12 Jan 2023 18:24:11 -0500 Subject: [PATCH 011/561] Update controller package names to match the owning API group name (#2150) * Update controller package names to match the owning API group name * feedback. Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- cmd/githubwebhookserver/main.go | 6 +- .../actions.summerwind.net/autoscaling.go | 2 +- .../autoscaling_test.go | 2 +- .../actions.summerwind.net/constants.go | 2 +- ...orizontal_runner_autoscaler_batch_scale.go | 2 +- .../horizontal_runner_autoscaler_webhook.go | 2 +- ...rizontal_runner_autoscaler_webhook_test.go | 2 +- ...zontal_runner_autoscaler_webhook_worker.go | 2 +- ...l_runner_autoscaler_webhook_worker_test.go | 2 +- .../horizontalrunnerautoscaler_controller.go | 2 +- .../integration_test.go | 2 +- .../multi_githubclient.go | 2 +- .../new_runner_pod_test.go | 2 +- .../persistent_volume_claim_controller.go | 2 +- .../persistent_volume_controller.go | 2 +- .../pod_runner_token_injector.go | 2 +- .../runner_controller.go | 2 +- .../runner_graceful_stop.go | 2 +- .../actions.summerwind.net/runner_pod.go | 2 +- .../runner_pod_controller.go | 2 +- .../runner_pod_owner.go | 2 +- .../runnerdeployment_controller.go | 2 +- .../runnerdeployment_controller_test.go | 2 +- .../runnerreplicaset_controller.go | 2 +- .../runnerreplicaset_controller_test.go | 2 +- .../runnerset_controller.go | 2 +- .../actions.summerwind.net/schedule.go | 2 +- .../actions.summerwind.net/schedule_test.go | 2 +- .../actions.summerwind.net/suite_test.go | 2 +- .../actions.summerwind.net/sync_volumes.go | 2 +- .../testresourcereader.go | 2 +- .../testresourcereader_test.go | 2 +- controllers/actions.summerwind.net/utils.go | 2 +- .../actions.summerwind.net/utils_test.go | 2 +- go.mod | 1 - go.sum | 374 +----------------- 36 files changed, 37 insertions(+), 410 deletions(-) diff --git a/cmd/githubwebhookserver/main.go b/cmd/githubwebhookserver/main.go index ef1ca84cd4..2845772032 100644 --- a/cmd/githubwebhookserver/main.go +++ b/cmd/githubwebhookserver/main.go @@ -27,7 +27,7 @@ import ( "time" actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1" - "github.com/actions/actions-runner-controller/controllers/actions.summerwind.net" + actionssummerwindnet "github.com/actions/actions-runner-controller/controllers/actions.summerwind.net" "github.com/actions/actions-runner-controller/github" "github.com/actions/actions-runner-controller/logging" @@ -90,7 +90,7 @@ func main() { flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&watchNamespace, "watch-namespace", "", "The namespace to watch for HorizontalRunnerAutoscaler's to scale on Webhook. Set to empty for letting it watch for all namespaces.") flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`) - flag.IntVar(&queueLimit, "queue-limit", controllers.DefaultQueueLimit, `The maximum length of the scale operation queue. The scale opration is enqueued per every matching webhook event, and the server returns a 500 HTTP status when the queue was already full on enqueue attempt.`) + flag.IntVar(&queueLimit, "queue-limit", actionssummerwindnet.DefaultQueueLimit, `The maximum length of the scale operation queue. The scale opration is enqueued per every matching webhook event, and the server returns a 500 HTTP status when the queue was already full on enqueue attempt.`) flag.StringVar(&webhookSecretToken, "github-webhook-secret-token", "", "The personal access token of GitHub.") flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.") flag.Int64Var(&c.AppID, "github-app-id", c.AppID, "The application ID of GitHub App.") @@ -160,7 +160,7 @@ func main() { os.Exit(1) } - hraGitHubWebhook := &controllers.HorizontalRunnerAutoscalerGitHubWebhook{ + hraGitHubWebhook := &actionssummerwindnet.HorizontalRunnerAutoscalerGitHubWebhook{ Name: "webhookbasedautoscaler", Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("webhookbasedautoscaler"), diff --git a/controllers/actions.summerwind.net/autoscaling.go b/controllers/actions.summerwind.net/autoscaling.go index 04c950a2e1..906bdce2c7 100644 --- a/controllers/actions.summerwind.net/autoscaling.go +++ b/controllers/actions.summerwind.net/autoscaling.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/autoscaling_test.go b/controllers/actions.summerwind.net/autoscaling_test.go index 0d69de1289..ec0ac79ae2 100644 --- a/controllers/actions.summerwind.net/autoscaling_test.go +++ b/controllers/actions.summerwind.net/autoscaling_test.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/constants.go b/controllers/actions.summerwind.net/constants.go index 5c416e01b1..9f0947f5b5 100644 --- a/controllers/actions.summerwind.net/constants.go +++ b/controllers/actions.summerwind.net/constants.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import "time" diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go index 9c1abbc237..d3914f10db 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go index e0cee795aa..e3710924d2 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go index 0d081231d5..1675b88be4 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "bytes" diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_worker.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_worker.go index f674f458cc..7a07f6fadc 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_worker.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_worker.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_worker_test.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_worker_test.go index e2bf0cd65b..42ec960bbd 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_worker_test.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_worker_test.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/horizontalrunnerautoscaler_controller.go b/controllers/actions.summerwind.net/horizontalrunnerautoscaler_controller.go index 08afd5c306..0aa5a7b6b5 100644 --- a/controllers/actions.summerwind.net/horizontalrunnerautoscaler_controller.go +++ b/controllers/actions.summerwind.net/horizontalrunnerautoscaler_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/integration_test.go b/controllers/actions.summerwind.net/integration_test.go index bf9d8607cf..c303eac2e0 100644 --- a/controllers/actions.summerwind.net/integration_test.go +++ b/controllers/actions.summerwind.net/integration_test.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/multi_githubclient.go b/controllers/actions.summerwind.net/multi_githubclient.go index c3ec9b9e89..912a410ad5 100644 --- a/controllers/actions.summerwind.net/multi_githubclient.go +++ b/controllers/actions.summerwind.net/multi_githubclient.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/new_runner_pod_test.go b/controllers/actions.summerwind.net/new_runner_pod_test.go index 03914b0e38..5529ca42e7 100644 --- a/controllers/actions.summerwind.net/new_runner_pod_test.go +++ b/controllers/actions.summerwind.net/new_runner_pod_test.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "testing" diff --git a/controllers/actions.summerwind.net/persistent_volume_claim_controller.go b/controllers/actions.summerwind.net/persistent_volume_claim_controller.go index 0405e77191..49cec2a8d4 100644 --- a/controllers/actions.summerwind.net/persistent_volume_claim_controller.go +++ b/controllers/actions.summerwind.net/persistent_volume_claim_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/persistent_volume_controller.go b/controllers/actions.summerwind.net/persistent_volume_controller.go index dff8aa8c5e..ca13fec07b 100644 --- a/controllers/actions.summerwind.net/persistent_volume_controller.go +++ b/controllers/actions.summerwind.net/persistent_volume_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/pod_runner_token_injector.go b/controllers/actions.summerwind.net/pod_runner_token_injector.go index 8beae0f8ff..45dfe827b8 100644 --- a/controllers/actions.summerwind.net/pod_runner_token_injector.go +++ b/controllers/actions.summerwind.net/pod_runner_token_injector.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/runner_controller.go b/controllers/actions.summerwind.net/runner_controller.go index a3e0af8558..6c0749780d 100644 --- a/controllers/actions.summerwind.net/runner_controller.go +++ b/controllers/actions.summerwind.net/runner_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/runner_graceful_stop.go b/controllers/actions.summerwind.net/runner_graceful_stop.go index c761b435bb..a3cdb43f41 100644 --- a/controllers/actions.summerwind.net/runner_graceful_stop.go +++ b/controllers/actions.summerwind.net/runner_graceful_stop.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/runner_pod.go b/controllers/actions.summerwind.net/runner_pod.go index 9bac0e1822..eb247e5884 100644 --- a/controllers/actions.summerwind.net/runner_pod.go +++ b/controllers/actions.summerwind.net/runner_pod.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import corev1 "k8s.io/api/core/v1" diff --git a/controllers/actions.summerwind.net/runner_pod_controller.go b/controllers/actions.summerwind.net/runner_pod_controller.go index d9ac27a080..02aeb66a11 100644 --- a/controllers/actions.summerwind.net/runner_pod_controller.go +++ b/controllers/actions.summerwind.net/runner_pod_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/runner_pod_owner.go b/controllers/actions.summerwind.net/runner_pod_owner.go index 51567a2d4b..77cd8e3b4e 100644 --- a/controllers/actions.summerwind.net/runner_pod_owner.go +++ b/controllers/actions.summerwind.net/runner_pod_owner.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/runnerdeployment_controller.go b/controllers/actions.summerwind.net/runnerdeployment_controller.go index 2af8eba4b3..7753b640e0 100644 --- a/controllers/actions.summerwind.net/runnerdeployment_controller.go +++ b/controllers/actions.summerwind.net/runnerdeployment_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/runnerdeployment_controller_test.go b/controllers/actions.summerwind.net/runnerdeployment_controller_test.go index 0ba29828b3..1013572a98 100644 --- a/controllers/actions.summerwind.net/runnerdeployment_controller_test.go +++ b/controllers/actions.summerwind.net/runnerdeployment_controller_test.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/runnerreplicaset_controller.go b/controllers/actions.summerwind.net/runnerreplicaset_controller.go index 089ca850ce..f86d80fb00 100644 --- a/controllers/actions.summerwind.net/runnerreplicaset_controller.go +++ b/controllers/actions.summerwind.net/runnerreplicaset_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go b/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go index 415b12586b..13a66343f1 100644 --- a/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go +++ b/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/runnerset_controller.go b/controllers/actions.summerwind.net/runnerset_controller.go index 497aafec1a..f937237ff8 100644 --- a/controllers/actions.summerwind.net/runnerset_controller.go +++ b/controllers/actions.summerwind.net/runnerset_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/schedule.go b/controllers/actions.summerwind.net/schedule.go index bc03c9f528..89152e0824 100644 --- a/controllers/actions.summerwind.net/schedule.go +++ b/controllers/actions.summerwind.net/schedule.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "fmt" diff --git a/controllers/actions.summerwind.net/schedule_test.go b/controllers/actions.summerwind.net/schedule_test.go index 79b03b8558..5077419fb2 100644 --- a/controllers/actions.summerwind.net/schedule_test.go +++ b/controllers/actions.summerwind.net/schedule_test.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "testing" diff --git a/controllers/actions.summerwind.net/suite_test.go b/controllers/actions.summerwind.net/suite_test.go index 6918bd91ff..24aa973f6a 100644 --- a/controllers/actions.summerwind.net/suite_test.go +++ b/controllers/actions.summerwind.net/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package actionssummerwindnet import ( "os" diff --git a/controllers/actions.summerwind.net/sync_volumes.go b/controllers/actions.summerwind.net/sync_volumes.go index e95c2c18cb..a8cbae0f06 100644 --- a/controllers/actions.summerwind.net/sync_volumes.go +++ b/controllers/actions.summerwind.net/sync_volumes.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/testresourcereader.go b/controllers/actions.summerwind.net/testresourcereader.go index 9b12032067..30112473d6 100644 --- a/controllers/actions.summerwind.net/testresourcereader.go +++ b/controllers/actions.summerwind.net/testresourcereader.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/testresourcereader_test.go b/controllers/actions.summerwind.net/testresourcereader_test.go index 7b78928bc4..3d5946c25a 100644 --- a/controllers/actions.summerwind.net/testresourcereader_test.go +++ b/controllers/actions.summerwind.net/testresourcereader_test.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "context" diff --git a/controllers/actions.summerwind.net/utils.go b/controllers/actions.summerwind.net/utils.go index 36781daf2b..39a28d6104 100644 --- a/controllers/actions.summerwind.net/utils.go +++ b/controllers/actions.summerwind.net/utils.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet func filterLabels(labels map[string]string, filter string) map[string]string { filtered := map[string]string{} diff --git a/controllers/actions.summerwind.net/utils_test.go b/controllers/actions.summerwind.net/utils_test.go index 0e54b4fc26..53bbcd0814 100644 --- a/controllers/actions.summerwind.net/utils_test.go +++ b/controllers/actions.summerwind.net/utils_test.go @@ -1,4 +1,4 @@ -package controllers +package actionssummerwindnet import ( "reflect" diff --git a/go.mod b/go.mod index 06d0a8d72e..0cd443ad7e 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,6 @@ require ( github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/emicklei/go-restful v2.9.5+incompatible // indirect github.com/emicklei/go-restful/v3 v3.8.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect diff --git a/go.sum b/go.sum index 892742dc6a..0891d26087 100644 --- a/go.sum +++ b/go.sum @@ -17,7 +17,6 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= @@ -35,7 +34,6 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -46,18 +44,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -71,27 +59,15 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 h1:5+NghM1Zred9Z078QEZtm28G/kfDfZN/92gkDlLwGVA= github.com/bradleyfalzon/ghinstallation/v2 v2.1.0/go.mod h1:Xg3xPRN5Mcq6GDqeUVhFbjEWMb4JHCyWEeeBGEYQoTU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -104,31 +80,11 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -144,18 +100,10 @@ github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -169,19 +117,15 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= @@ -189,17 +133,12 @@ github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5F github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -235,9 +174,6 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -252,14 +188,11 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FCD+K3FI= github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28= -github.com/google/go-github/v47 v47.0.0 h1:eQap5bIRZibukP0VhngWgpuM0zhY4xntqOzn6DhdkE4= -github.com/google/go-github/v47 v47.0.0/go.mod h1:DRjdvizXE876j0YOZwInB1ESpOcU/xFBClNiQLSdorE= github.com/google/go-github/v47 v47.1.0 h1:Cacm/WxQBOa9lF0FT0EMjZ2BWMetQ1TQfyurn4yF1z8= github.com/google/go-github/v47 v47.1.0/go.mod h1:VPZBXNbFSJGjyjFRUKo9vZGawTajnWzC/YjGw/oFKi0= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -291,45 +224,17 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -340,18 +245,14 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -359,28 +260,13 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -388,126 +274,65 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/ginkgo/v2 v2.5.0 h1:TRtrvv2vdQqzkwrQ1ke6vtXf7IK34RBUJafIy1wMwls= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= -github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.20.2 h1:8uQq0zMgLEfa0vRrrBgaJF2gyW9Da9BmfGV+OyUzfkY= -github.com/onsi/gomega v1.20.2/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= -github.com/onsi/gomega v1.21.1 h1:OB/euWYIExnPBohllTicTHmGTrMaqJ67nIu80j0/uEM= -github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= -github.com/onsi/gomega v1.22.0 h1:AIg2/OntwkBiCg5Tt1ayyiF1ArFrWFoCSMtMi/wdApk= -github.com/onsi/gomega v1.22.0/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= -github.com/onsi/gomega v1.22.1 h1:pY8O4lBfsHKZHM/6nrxkhVPUznOlIu3quZcKP/M20KI= -github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= -github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.13.1 h1:3gMjIY2+/hzmqhtUC/aQNYldJA6DtH3CgQvwS+02K1c= -github.com/prometheus/client_golang v1.13.1/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -520,34 +345,16 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/teambition/rrule-go v1.8.0 h1:a/IX5s56hGkFF+nRlJUooZU/45OTeeldBGL29nDKIHw= github.com/teambition/rrule-go v1.8.0/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -555,51 +362,23 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= -go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= -go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -637,14 +416,10 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -675,7 +450,6 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -683,21 +457,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -715,26 +476,7 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26 h1:uBgVQYJLi/m8M0wzp+aGwBWt90gMRoOVf+aWTW10QHI= -golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 h1:VnGaRqoLmqZH/3TMLJwYCEWkR4j1nuIU1U9TvbqsDUw= -golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 h1:2o1E+E8TpNLklK9nHiPiK1uzIYrIHt+cQx3ynCwq9V8= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1 h1:3VPzK7eqH25j7GYw5w6g/GzNRc0/fYtrxz27z1gD4W0= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= -golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= -golang.org/x/oauth2 v0.2.0 h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU= -golang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs= golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -748,12 +490,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -767,7 +506,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -783,13 +521,11 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -802,7 +538,6 @@ golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -813,30 +548,14 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -847,41 +566,30 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -901,7 +609,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -922,7 +629,6 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -986,7 +692,6 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -998,7 +703,6 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1023,7 +727,6 @@ google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1063,8 +766,6 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -1077,13 +778,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1098,8 +794,6 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1107,97 +801,31 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/api v0.24.3 h1:tt55QEmKd6L2k5DP6G/ZzdMQKvG5ro4H4teClqm0sTY= -k8s.io/api v0.24.3/go.mod h1:elGR/XSZrS7z7cSZPzVWaycpJuGIw57j9b95/1PdJNI= -k8s.io/api v0.25.0 h1:H+Q4ma2U/ww0iGB78ijZx6DRByPz6/733jIuFpX70e0= -k8s.io/api v0.25.0/go.mod h1:ttceV1GyV1i1rnmvzT3BST08N6nGt+dudGrquzVQWPk= -k8s.io/api v0.25.1 h1:yL7du50yc93k17nH/Xe9jujAYrcDkI/i5DL1jPz4E3M= -k8s.io/api v0.25.1/go.mod h1:hh4itDvrWSJsmeUc28rIFNri8MatNAAxJjKcQmhX6TU= -k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8= -k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0= -k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ= -k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI= -k8s.io/api v0.25.4 h1:3YO8J4RtmG7elEgaWMb4HgmpS2CfY1QlaOz9nwB+ZSs= -k8s.io/api v0.25.4/go.mod h1:IG2+RzyPQLllQxnhzD8KQNEu4c4YvyDTpSMztf4A0OQ= k8s.io/api v0.25.5 h1:mqyHf7aoaYMpdvO87mqpol+Qnsmo+y09S0PMIXwiZKo= k8s.io/api v0.25.5/go.mod h1:RzplZX0Z8rV/WhSTfEvnyd91bBhBQTRWo85qBQwRmb8= -k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= -k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= k8s.io/apiextensions-apiserver v0.25.0 h1:CJ9zlyXAbq0FIW8CD7HHyozCMBpDSiH7EdrSTCZcZFY= k8s.io/apiextensions-apiserver v0.25.0/go.mod h1:3pAjZiN4zw7R8aZC5gR0y3/vCkGlAjCazcg1me8iB/E= -k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.24.3 h1:hrFiNSA2cBZqllakVYyH/VyEh4B581bQRmqATJSeQTg= -k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.25.0 h1:MlP0r6+3XbkUG2itd6vp3oxbtdQLQI94fD5gCS+gnoU= -k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0= -k8s.io/apimachinery v0.25.1 h1:t0XrnmCEHVgJlR2arwO8Awp9ylluDic706WePaYCBTI= -k8s.io/apimachinery v0.25.1/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA= -k8s.io/apimachinery v0.25.2 h1:WbxfAjCx+AeN8Ilp9joWnyJ6xu9OMeS/fsfjK/5zaQs= -k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA= -k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc= -k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= -k8s.io/apimachinery v0.25.4 h1:CtXsuaitMESSu339tfhVXhQrPET+EiWnIY1rcurKnAc= -k8s.io/apimachinery v0.25.4/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= k8s.io/apimachinery v0.25.5 h1:SQomYHvv+aO43qdu3QKRf9YuI0oI8w3RrOQ1qPbAUGY= k8s.io/apimachinery v0.25.5/go.mod h1:1S2i1QHkmxc8+EZCIxe/fX5hpldVXk4gvnJInMEb8D4= -k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= -k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/client-go v0.24.3 h1:Nl1840+6p4JqkFWEW2LnMKU667BUxw03REfLAVhuKQY= -k8s.io/client-go v0.24.3/go.mod h1:AAovolf5Z9bY1wIg2FZ8LPQlEdKHjLI7ZD4rw920BJw= -k8s.io/client-go v0.25.0 h1:CVWIaCETLMBNiTUta3d5nzRbXvY5Hy9Dpl+VvREpu5E= -k8s.io/client-go v0.25.0/go.mod h1:lxykvypVfKilxhTklov0wz1FoaUZ8X4EwbhS6rpRfN8= -k8s.io/client-go v0.25.1 h1:uFj4AJKtE1/ckcSKz8IhgAuZTdRXZDKev8g387ndD58= -k8s.io/client-go v0.25.1/go.mod h1:rdFWTLV/uj2C74zGbQzOsmXPUtMAjSf7ajil4iJUNKo= -k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo= -k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= -k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0= -k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA= -k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8= -k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw= k8s.io/client-go v0.25.5 h1:7QWVK0Ph4bLn0UwotPTc2FTgm8shreQXyvXnnHDd8rE= k8s.io/client-go v0.25.5/go.mod h1:bOeoaUUdpyz3WDFGo+Xm3nOQFh2KuYXRDwrvbAPtFQA= -k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= -k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= -k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= k8s.io/component-base v0.25.0 h1:haVKlLkPCFZhkcqB6WCvpVxftrg6+FK5x1ZuaIDaQ5Y= k8s.io/component-base v0.25.0/go.mod h1:F2Sumv9CnbBlqrpdf7rKZTmmd2meJq0HizeyY/yAFxk= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= -sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= -sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRMc2WIQ= -sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= sigs.k8s.io/controller-runtime v0.13.1 h1:tUsRCSJVM1QQOOeViGeX3GMT3dQF1eePPw6sEE3xSlg= sigs.k8s.io/controller-runtime v0.13.1/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= From 8cceb3835636fbb63da527fe20fd25d0c0585908 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Sat, 14 Jan 2023 19:35:56 -0500 Subject: [PATCH 012/561] Include actions-runner-controller in runner's User-Agent for better telemetry in Actions service. (#2155) --- .../new_runner_pod_test.go | 24 +++++++++++++++++++ .../runner_controller.go | 5 ++++ 2 files changed, 29 insertions(+) diff --git a/controllers/actions.summerwind.net/new_runner_pod_test.go b/controllers/actions.summerwind.net/new_runner_pod_test.go index 5529ca42e7..4d3b419d74 100644 --- a/controllers/actions.summerwind.net/new_runner_pod_test.go +++ b/controllers/actions.summerwind.net/new_runner_pod_test.go @@ -131,6 +131,10 @@ func TestNewRunnerPod(t *testing.T) { Name: "RUNNER_STATUS_UPDATE_HOOK", Value: "false", }, + { + Name: "GITHUB_ACTIONS_RUNNER_EXTRA_USER_AGENT", + Value: "actions-runner-controller/NA", + }, { Name: "DOCKER_HOST", Value: "tcp://localhost:2376", @@ -274,6 +278,10 @@ func TestNewRunnerPod(t *testing.T) { Name: "RUNNER_STATUS_UPDATE_HOOK", Value: "false", }, + { + Name: "GITHUB_ACTIONS_RUNNER_EXTRA_USER_AGENT", + Value: "actions-runner-controller/NA", + }, }, VolumeMounts: []corev1.VolumeMount{ { @@ -356,6 +364,10 @@ func TestNewRunnerPod(t *testing.T) { Name: "RUNNER_STATUS_UPDATE_HOOK", Value: "false", }, + { + Name: "GITHUB_ACTIONS_RUNNER_EXTRA_USER_AGENT", + Value: "actions-runner-controller/NA", + }, }, VolumeMounts: []corev1.VolumeMount{ { @@ -649,6 +661,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { Name: "RUNNER_STATUS_UPDATE_HOOK", Value: "false", }, + { + Name: "GITHUB_ACTIONS_RUNNER_EXTRA_USER_AGENT", + Value: "actions-runner-controller/NA", + }, { Name: "DOCKER_HOST", Value: "tcp://localhost:2376", @@ -807,6 +823,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { Name: "RUNNER_STATUS_UPDATE_HOOK", Value: "false", }, + { + Name: "GITHUB_ACTIONS_RUNNER_EXTRA_USER_AGENT", + Value: "actions-runner-controller/NA", + }, { Name: "RUNNER_NAME", Value: "runner", @@ -908,6 +928,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { Name: "RUNNER_STATUS_UPDATE_HOOK", Value: "false", }, + { + Name: "GITHUB_ACTIONS_RUNNER_EXTRA_USER_AGENT", + Value: "actions-runner-controller/NA", + }, { Name: "RUNNER_NAME", Value: "runner", diff --git a/controllers/actions.summerwind.net/runner_controller.go b/controllers/actions.summerwind.net/runner_controller.go index 6c0749780d..c208d1a225 100644 --- a/controllers/actions.summerwind.net/runner_controller.go +++ b/controllers/actions.summerwind.net/runner_controller.go @@ -25,6 +25,7 @@ import ( "strings" "time" + "github.com/actions/actions-runner-controller/build" "github.com/actions/actions-runner-controller/hash" "github.com/go-logr/logr" @@ -835,6 +836,10 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru Name: "RUNNER_STATUS_UPDATE_HOOK", Value: fmt.Sprintf("%v", useRunnerStatusUpdateHook), }, + { + Name: "GITHUB_ACTIONS_RUNNER_EXTRA_USER_AGENT", + Value: fmt.Sprintf("actions-runner-controller/%s", build.Version), + }, } var seLinuxOptions *corev1.SELinuxOptions From 49872861f349b30dcaba7a0f704e3ebce9c0b0c5 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Sun, 15 Jan 2023 19:56:13 +0900 Subject: [PATCH 013/561] Add release note for ARC 0.27.0 (#2068) --- docs/releasenotes/0.27.md | 132 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 docs/releasenotes/0.27.md diff --git a/docs/releasenotes/0.27.md b/docs/releasenotes/0.27.md new file mode 100644 index 0000000000..ded0970bb2 --- /dev/null +++ b/docs/releasenotes/0.27.md @@ -0,0 +1,132 @@ +# actions-runner-controller v0.27.0 + +All planned changes in this release can be found in the milestone https://github.com/actions-runner-controller/actions-runner-controller/milestone/10. + +Also see https://github.com/actions-runner-controller/actions-runner-controller/compare/v0.26.0...v0.27.0 for full changelog. + +This log documents breaking changes and major enhancements + +## Upgrading + +In case you're using our Helm chart to deploy ARC, use the chart 0.21.0 or greater. Don't miss upgrading CRDs as usual! Helm doesn't upgrade CRDs. + +## BREAKING CHANGE : `workflow_job` became ARC's only supported webhook event as the scale trigger. + +In this release, we've removed support for legacy `check_run`, `push`, and `pull_request` webhook events, in favor of `workflow_job` that has been released a year ago. Since then, it served all the use-cases formely and partially supported by the legacy events, and we should be ready to fully migrate to `workflow_job`. + +Anyone who's still using legacy webook events should see `HorizontalRunnerAutoscaler` specs that look similar to the following examples: + +```yaml +kind: HorizontalRunnerAutoscaler +spec: + scaleUpTriggers: + - githubEvent: + push: {} +``` + +```yaml +kind: HorizontalRunnerAutoscaler +spec: + scaleUpTriggers: + - githubEvent: + checkRun: {} +``` + +```yaml +kind: HorizontalRunnerAutoscaler +spec: + scaleUpTriggers: + - githubEvent: + pullRequest: {} +``` + +You need to update the spec to look like the below, along with enabling the `Workflow Job` events(and disabling unneeded `Push`, `Check Run`, and `Pull Request` evenst) on your webhook setting page on GitHub. + +```yaml +kind: HorizontalRunnerAutoscaler +spec: + scaleUpTriggers: + - githubEvent: + workflowJob: {} +``` + +Relevant PR(s): #2001 + +## Fix : Runner pods should work more reliably with cluster-autoscaler + +We've fixed many edge-cases in the runner pod termination process which seem to have resulted in various issues, like pods stuck in Terminating, workflow jobs being stuck for 10 minutes or so when an external controller like cluster-autoscaler tried to terminate the runner pod that is still running a workflow job, a workflow job fails due to a job container step being unable access the docker daemon, and so on. + +Do note that you need to set appropariate `RUNNER_GRACEFUL_STOP_TIMEOUT` for both the `docker` sidecar container and the `runner` container specs to let it wait for long and sufficient time for your use-case. + +`RUNNER_GRACEFUL_STOP_TIMEOUT` is basically the longest time the runner stop process to wait until the runner agent to gracefully stop. + +It's set to `RUNNER_GRACEFUL_STOP_TIMEOUT=15` by default, which might be too short for any use-cases. + +For example, in case you're using AWS Spot Instances to power nodes for runner pods, it gives you 2 minutes at the longest. You'd want to set the graceful stop timeout slightly shorter than the 2 minutes, like `110` or `100` seconds depending how much cpu, memory and storage your runner pod is provided. + +With rich cpu/memory/storage/network resources, the runner agent could stop gracefully well within 10 seconds, making `110` the right setting. With fewer resources, the runner agent could take more than 10 seconds to stop gracefully. If you think it would take 20 seconds for your environment, `100` would be the right setting. + +`RUNNER_GRACEFUL_STOP_TIMEOUT` is designed to be used to let the runner stop process as long as possible to avoid cancelling the workflow job in the middle of processing, yet avoiding the workflow job to stuck for 10 minutes due to the node disappear before the runner agent cancelling the job. + +Under the hood, `RUNNER_GRACEFUL_STOP_TIMEOUT` works by instructing [runner's signal handler](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/runner/graceful-stop.sh#L7) to delay forwarding `SIGTERM` sent by Kubernetes on pod terminatino down to the runner agent. The runner agent is supposed to cancel the workflow job only on `SIGTERM` so making this delay longer allows you to delay cancelling the workfow job, which results in a more graceful period to stop the runner. Practically, the runner pod stops gracefully only when the workflow job running within the runner pod has completed before the runner graceful stop timeout elapses. The timeout can't be forever in practice, although it might theoretically possible depending on your cluster environment. AWS Spot Instances, again for example, gives you 2 minutes to gracefully stop the whole node, and therefore `RUNNER_GRACEFUL_STOP_TIMEOUT` can't be longer than that. + +If you have success stories with the new `RUNNER_GRACEFUL_STOP_TIMEOUT`, please don't hesitate to create a `Show and Tell` discussion in our GitHub Discussions to share what configuration worked on which environment, including the name of your cloud provider, the name of managed Kubernetes service, the graceful stop timeout for nodes(defined and provided by the provider or the service) and the runner pods (`RUNNER_GRACEFUL_STOP_TIMEOUT`). + +Relevant PR(s): #1759, #1851, #1855 + +## ENHANCEMENT : More reliable and customizable "wait-for-docker" feature + +You can now add a `WAIT_FOR_DOCKER_SECONDS` envvar to the `runner` container of the runner pod spec to customize how long you want the runner startup script to wait until the docker daemon gets up and running. Previously this has been hard-coded to 120 seconds and it wasn't sufficient in some environments. + +Along with the enhancement, we also fixed a bug in the runner startup script that it didn't exit immediately on the docker startup timeout. +The bug resulted in that you see a job container step failing due to missing docker socket. Ideally it should have kept auto-restarting the whole runner pod until you get a fully working runner pod with the working runner agent plus the docker daemon (that started within the timeout), and therefore you should have never seen the job step failing due to docker issue. +We fixed it so it should work as intended now. + +Relvant PR(s): #1999 + +## ENHANCEMENT : New webhook and metrics server for monitoring workflow jobs + +**This feature is 99% authored and contributed by @ColinHeathman. Big kudos to Colin for his awesome work! ** + +You can now use the new `actions-metrics-server` to expose additional GitHub webhook endpoint for receiving `workflow_job` events and calculating and collecting various metrics related to the jobs. Please see the updated chart documentation for how to enable it. + +We made it a separate component instead of adding the new metrics collector to our existing `github-webhook-server` to retain the ability to scale the `github-webhook-server` to two or more replicas for availability and scalability. + +Also note that `actions-metrics-server` cannot be scaled to 2 or more replicas today. +That's because it needs to store it's state somewhere to retain the `workflow_job` webhook event until it receives the corresponding webhook event to finally calculate the metric value, and the only supported state store is in-memory as of today. + +For exmaple, it needs to save `workflow_job` of `status=queued` until it receives the corresponding `workflow_job` of `status=in_progress` to finally calculate the queue duration metric value. + +We may add another state store that is backed by e.g. Memcached or Redis if there's enough demand. But we opted to not complicate ARC for now. You can follow the relevant discussion in [this thread](https://github.com/actions-runner-controller/actions-runner-controller/pull/1814#discussion_r974758924). + +Relvant PR(s): #1814, #2057 + +## New runner images based on Ubuntu 22.04 + +We started publishing new runner images based on Ubuntu 22.04 with the following tags: + +``` +summerwind/actions-runner-dind-rootless:v2.299.1-ubuntu-22.04 +summerwind/actions-runner-dind-rootless:v2.299.1-ubuntu-22.04-$COMMIT_ID +summerwind/actions-runner-dind-rootless:ubuntu-22.04-latest +ghcr.io/actions-runner-controller/actions-runner-controller/actions-runner-dind-rootless:v2.299.1-ubuntu-22.04 +ghcr.io/actions-runner-controller/actions-runner-controller/actions-runner-dind-rootless:v2.299.1-ubuntu-22.04-$COMMIT_ID +ghcr.io/actions-runner-controller/actions-runner-controller/actions-runner-dind-rootless:ubuntu-22.04-latest +``` + +The `latest` tags for the runner images will stick with Ubuntu 20.04 for a while. We'll try to submit an issue or a discussion for notice before switching the latest to 22.04. See [this thread](https://github.com/actions/actions-runner-controller/pull/2036#discussion_r1032856803) for more context. + +Note that we took this chance to slim down the runner images for more security, maintainability, and extensibility. That said, some packages that are present by default in hosted runners but can easily be installed using `setup-` actions (like `python` using the `setup-python` action) and other convenient but not strictly necessary packages like `ftp`, `telnet`, `upx` and so on are no longer installed onto our 22.04 based runners. Consult below Dockerfile parts and add some `setup-` actions to your workflows or build your own custom runner image(s) based on our new 22.04 images, in case you relied on some packages present in our 20.04 images but not in our 22.04 images: + +- [20.04 runner](https://github.com/actions/actions-runner-controller/blob/master/runner/actions-runner.ubuntu-20.04.dockerfile#L17-L51) +- [22.04 runner](https://github.com/actions/actions-runner-controller/blob/master/runner/actions-runner.ubuntu-22.04.dockerfile#L15-L28) + +- [20.04 dind-runner](https://github.com/actions/actions-runner-controller/blob/master/runner/actions-runner-dind.ubuntu-20.04.dockerfile#L17-L51) +- [22.04 dind-runner](https://github.com/actions/actions-runner-controller/blob/master/runner/actions-runner-dind.ubuntu-22.04.dockerfile#L15-L30) + +- [20.04 rootless-dind-runner](https://github.com/actions/actions-runner-controller/blob/master/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile#L19-L54) +- [22.04 rootless-dind-runner](https://github.com/actions/actions-runner-controller/blob/master/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile#L18-L33) + +These images are not strictly tied to the v0.27.0 release. You can freely try the new images with ARC v0.26.0, or use both 20.04 and 22.04 based images with ARC v0.27.0. + +Relevant PR(s): #1924, #2030, #2033, #2036, #2050, #2078, #2079, #2080, #2098 From 7e961b96cc21df815a8522330cc0d9490cd74fa8 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 16 Jan 2023 09:40:40 +0100 Subject: [PATCH 014/561] Add resolve push to registries step (#2157) --- .github/workflows/publish-arc.yaml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish-arc.yaml b/.github/workflows/publish-arc.yaml index 5c60d5b645..7034c62a70 100644 --- a/.github/workflows/publish-arc.yaml +++ b/.github/workflows/publish-arc.yaml @@ -71,13 +71,22 @@ jobs: echo "RELEASE_TAG_NAME=${{ github.event.inputs.release_tag_name }}" >> $GITHUB_ENV fi + - name: Resolve push to registries + run: | + # Define the push to registries based on the event type + if [[ "${{ github.event_name }}" == "release" ]]; then + echo "PUSH_TO_REGISTRIES=true" >> $GITHUB_ENV + elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + echo "PUSH_TO_REGISTRIES=${{ inputs.push_to_registries }}" >> $GITHUB_ENV + fi + - name: Trigger Build And Push Images To Registries run: | # Authenticate gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }} # Trigger the workflow run - jq -n '{"event_type": "arc", "client_payload": {"release_tag_name": "${{ env.RELEASE_TAG_NAME }}", "push_to_registries": ${{ inputs.push_to_registries }}}}' \ + jq -n '{"event_type": "arc", "client_payload": {"release_tag_name": "${{ env.RELEASE_TAG_NAME }}", "push_to_registries": "${{ env.PUSH_TO_REGISTRIES }}" }}' \ | gh api -X POST /repos/actions-runner-controller/releases/dispatches --input - - name: Job summary @@ -86,7 +95,7 @@ jobs: echo "" >> $GITHUB_STEP_SUMMARY echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY echo "- Release tag: ${{ env.RELEASE_TAG_NAME }}" >> $GITHUB_STEP_SUMMARY - echo "- Push to registries: ${{ inputs.push_to_registries }}" >> $GITHUB_STEP_SUMMARY + echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Status:**" >> $GITHUB_STEP_SUMMARY echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml)" >> $GITHUB_STEP_SUMMARY From 7106b72e10fea7d8faa255f5416e809feb2c0582 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 16 Jan 2023 10:04:41 +0100 Subject: [PATCH 015/561] Fix the workflow by adding the version resolve step (#2159) --- .github/workflows/publish-arc.yaml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/publish-arc.yaml b/.github/workflows/publish-arc.yaml index 7034c62a70..4c3a8075cd 100644 --- a/.github/workflows/publish-arc.yaml +++ b/.github/workflows/publish-arc.yaml @@ -48,6 +48,15 @@ jobs: tar zxvf ghr_v0.13.0_linux_amd64.tar.gz sudo mv ghr_v0.13.0_linux_amd64/ghr /usr/local/bin + - name: Set version env variable + run: | + # Define the release tag name based on the event type + if [[ "${{ github.event_name }}" == "release" ]]; then + echo "VERSION=$(cat ${GITHUB_EVENT_PATH} | jq -r '.release.tag_name')" >> $GITHUB_ENV + elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + echo "VERSION=${{ inputs.release_tag_name }}" >> $GITHUB_ENV + fi + - name: Upload artifacts env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -62,15 +71,6 @@ jobs: application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} organization: ${{ env.TARGET_ORG }} - - name: Set release tag name - run: | - # Define the release tag name based on the event type - if [[ "${{ github.event_name }}" == "release" ]]; then - echo "RELEASE_TAG_NAME=$(cat ${GITHUB_EVENT_PATH} | jq -r '.release.tag_name')" >> $GITHUB_ENV - elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then - echo "RELEASE_TAG_NAME=${{ github.event.inputs.release_tag_name }}" >> $GITHUB_ENV - fi - - name: Resolve push to registries run: | # Define the push to registries based on the event type @@ -86,7 +86,7 @@ jobs: gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }} # Trigger the workflow run - jq -n '{"event_type": "arc", "client_payload": {"release_tag_name": "${{ env.RELEASE_TAG_NAME }}", "push_to_registries": "${{ env.PUSH_TO_REGISTRIES }}" }}' \ + jq -n '{"event_type": "arc", "client_payload": {"release_tag_name": "${{ env.VERSION }}", "push_to_registries": "${{ env.PUSH_TO_REGISTRIES }}" }}' \ | gh api -X POST /repos/actions-runner-controller/releases/dispatches --input - - name: Job summary @@ -94,7 +94,7 @@ jobs: echo "The [publish-arc](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-arc.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY - echo "- Release tag: ${{ env.RELEASE_TAG_NAME }}" >> $GITHUB_STEP_SUMMARY + echo "- Release tag: ${{ env.VERSION }}" >> $GITHUB_STEP_SUMMARY echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Status:**" >> $GITHUB_STEP_SUMMARY From 7311778e2065ba40ef90180d37e29506446f9628 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Mon, 16 Jan 2023 18:24:24 +0900 Subject: [PATCH 016/561] chart: Bump chart and app versions for ARC 0.27.0 (#2160) --- charts/actions-runner-controller/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/actions-runner-controller/Chart.yaml b/charts/actions-runner-controller/Chart.yaml index 09dfe7f044..4d8ff15d0b 100644 --- a/charts/actions-runner-controller/Chart.yaml +++ b/charts/actions-runner-controller/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.21.1 +version: 0.22.0 # Used as the default manager tag value when no tag property is provided in the values.yaml -appVersion: 0.26.0 +appVersion: 0.27.0 home: https://github.com/actions/actions-runner-controller From 28f008d3fe31a250f6e0ab9f966e714c44d0587a Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 16 Jan 2023 10:31:43 +0100 Subject: [PATCH 017/561] fix: Update target branch from main to master (#2161) --- .github/workflows/publish-chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish-chart.yaml b/.github/workflows/publish-chart.yaml index b295394374..915734ba43 100644 --- a/.github/workflows/publish-chart.yaml +++ b/.github/workflows/publish-chart.yaml @@ -123,7 +123,7 @@ jobs: env: CHART_TARGET_ORG: actions-runner-controller CHART_TARGET_REPO: actions-runner-controller.github.io - CHART_TARGET_BRANCH: main + CHART_TARGET_BRANCH: master steps: - name: Checkout From f91916e424571a57861f24e825bc1e10e7e41281 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 17 Jan 2023 10:26:53 -0500 Subject: [PATCH 018/561] Ignore the new helm charts path for now. (#2165) --- .github/workflows/publish-chart.yaml | 2 ++ .github/workflows/validate-chart.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/.github/workflows/publish-chart.yaml b/.github/workflows/publish-chart.yaml index 915734ba43..69646136d1 100644 --- a/.github/workflows/publish-chart.yaml +++ b/.github/workflows/publish-chart.yaml @@ -10,6 +10,8 @@ on: - 'charts/**' - '.github/workflows/publish-chart.yaml' - '!charts/actions-runner-controller/docs/**' + - '!charts/actions-runner-controller-2/**' + - '!charts/auto-scaling-runner-set/**' - '!**.md' workflow_dispatch: diff --git a/.github/workflows/validate-chart.yaml b/.github/workflows/validate-chart.yaml index 99fd267dfe..a0fc2b4d89 100644 --- a/.github/workflows/validate-chart.yaml +++ b/.github/workflows/validate-chart.yaml @@ -6,6 +6,8 @@ on: - 'charts/**' - '.github/workflows/validate-chart.yaml' - '!charts/actions-runner-controller/docs/**' + - '!charts/actions-runner-controller-2/**' + - '!charts/auto-scaling-runner-set/**' - '!**.md' workflow_dispatch: env: From 3b17a94a4291e1f07115abbd46ce1732bdd94224 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 17 Jan 2023 12:06:20 -0500 Subject: [PATCH 019/561] Introduce new preview auto-scaling mode for ARC. (#2153) Co-authored-by: Cory Miller Co-authored-by: Nikola Jokic Co-authored-by: Ava Stancu Co-authored-by: Ferenc Hammerl Co-authored-by: Francesco Renzi Co-authored-by: Bassem Dghaidi --- Dockerfile | 2 + Makefile | 5 + PROJECT | 12 + .../v1alpha1/autoscalinglistener_types.go | 89 + .../v1alpha1/autoscalingrunnerset_types.go | 143 + .../v1alpha1/ephemeralrunner_types.go | 130 + .../v1alpha1/ephemeralrunnerset_types.go | 61 + .../v1alpha1/groupversion_info.go | 36 + .../v1alpha1/zz_generated.deepcopy.go | 488 ++ apis/actions.github.com/v1beta1/.keep | 0 .../autoScalerKubernetesManager.go | 129 + .../autoScalerMessageListener.go | 184 + .../autoScalerMessageListener_test.go | 701 +++ .../autoScalerService.go | 185 + .../autoScalerService_test.go | 631 +++ .../kubernetesManager.go | 12 + cmd/githubrunnerscalesetlistener/main.go | 151 + cmd/githubrunnerscalesetlistener/main_test.go | 92 + .../messageListener.go | 13 + .../mock_KubernetesManager.go | 57 + .../mock_RunnerScaleSetClient.go | 59 + .../sessionrefreshingclient.go | 123 + .../sessionrefreshingclient_test.go | 421 ++ ...tions.github.com_autoscalinglisteners.yaml | 97 + ...ions.github.com_autoscalingrunnersets.yaml | 4218 ++++++++++++++++ .../actions.github.com_ephemeralrunners.yaml | 4249 +++++++++++++++++ ...ctions.github.com_ephemeralrunnersets.yaml | 4206 ++++++++++++++++ config/crd/kustomization.yaml | 4 + config/manager/manager.yaml | 8 + .../rbac/autoscalinglistener_editor_role.yaml | 24 + .../rbac/autoscalinglistener_viewer_role.yaml | 20 + .../autoscalingrunnerset_editor_role.yaml | 24 + .../autoscalingrunnerset_viewer_role.yaml | 20 + config/rbac/ephemeralrunner_editor_role.yaml | 24 + config/rbac/ephemeralrunner_viewer_role.yaml | 20 + .../rbac/ephemeralrunnerset_editor_role.yaml | 24 + .../rbac/ephemeralrunnerset_viewer_role.yaml | 20 + config/rbac/role.yaml | 139 + controllers/actions.github.com/.keep | 0 .../autoscalinglistener_controller.go | 450 ++ .../autoscalinglistener_controller_test.go | 393 ++ .../autoscalingrunnerset_controller.go | 506 ++ .../autoscalingrunnerset_controller_test.go | 367 ++ controllers/actions.github.com/clientutil.go | 22 + controllers/actions.github.com/constants.go | 10 + .../ephemeralrunner_controller.go | 645 +++ .../ephemeralrunner_controller_test.go | 769 +++ .../ephemeralrunnerset_controller.go | 463 ++ .../ephemeralrunnerset_controller_test.go | 445 ++ .../actions.github.com/resourcebuilder.go | 437 ++ controllers/actions.github.com/suite_test.go | 91 + controllers/actions.github.com/utils.go | 27 + controllers/actions.github.com/utils_test.go | 34 + github/actions/client.go | 1101 +++++ github/actions/client_generate_jit_test.go | 75 + github/actions/client_job_acquisition_test.go | 144 + .../client_runner_scale_set_message_test.go | 269 ++ .../client_runner_scale_set_session_test.go | 244 + .../actions/client_runner_scale_set_test.go | 858 ++++ github/actions/client_runner_test.go | 219 + github/actions/errors.go | 71 + github/actions/fake/client.go | 235 + github/actions/fake/multi_client.go | 43 + github/actions/mock_ActionsService.go | 348 ++ github/actions/mock_SessionService.go | 103 + github/actions/multi_client.go | 164 + github/actions/multi_client_test.go | 163 + github/actions/sessionservice.go | 14 + github/actions/types.go | 153 + go.mod | 63 +- go.sum | 283 +- hash/fnv.go | 7 + hash/hash.go | 26 + logging/logger.go | 2 + main.go | 341 +- 75 files changed, 26073 insertions(+), 333 deletions(-) create mode 100644 apis/actions.github.com/v1alpha1/autoscalinglistener_types.go create mode 100644 apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go create mode 100644 apis/actions.github.com/v1alpha1/ephemeralrunner_types.go create mode 100644 apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go create mode 100644 apis/actions.github.com/v1alpha1/groupversion_info.go create mode 100644 apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go delete mode 100644 apis/actions.github.com/v1beta1/.keep create mode 100644 cmd/githubrunnerscalesetlistener/autoScalerKubernetesManager.go create mode 100644 cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go create mode 100644 cmd/githubrunnerscalesetlistener/autoScalerMessageListener_test.go create mode 100644 cmd/githubrunnerscalesetlistener/autoScalerService.go create mode 100644 cmd/githubrunnerscalesetlistener/autoScalerService_test.go create mode 100644 cmd/githubrunnerscalesetlistener/kubernetesManager.go create mode 100644 cmd/githubrunnerscalesetlistener/main.go create mode 100644 cmd/githubrunnerscalesetlistener/main_test.go create mode 100644 cmd/githubrunnerscalesetlistener/messageListener.go create mode 100644 cmd/githubrunnerscalesetlistener/mock_KubernetesManager.go create mode 100644 cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go create mode 100644 cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go create mode 100644 cmd/githubrunnerscalesetlistener/sessionrefreshingclient_test.go create mode 100644 config/crd/bases/actions.github.com_autoscalinglisteners.yaml create mode 100644 config/crd/bases/actions.github.com_autoscalingrunnersets.yaml create mode 100644 config/crd/bases/actions.github.com_ephemeralrunners.yaml create mode 100644 config/crd/bases/actions.github.com_ephemeralrunnersets.yaml create mode 100644 config/rbac/autoscalinglistener_editor_role.yaml create mode 100644 config/rbac/autoscalinglistener_viewer_role.yaml create mode 100644 config/rbac/autoscalingrunnerset_editor_role.yaml create mode 100644 config/rbac/autoscalingrunnerset_viewer_role.yaml create mode 100644 config/rbac/ephemeralrunner_editor_role.yaml create mode 100644 config/rbac/ephemeralrunner_viewer_role.yaml create mode 100644 config/rbac/ephemeralrunnerset_editor_role.yaml create mode 100644 config/rbac/ephemeralrunnerset_viewer_role.yaml delete mode 100644 controllers/actions.github.com/.keep create mode 100644 controllers/actions.github.com/autoscalinglistener_controller.go create mode 100644 controllers/actions.github.com/autoscalinglistener_controller_test.go create mode 100644 controllers/actions.github.com/autoscalingrunnerset_controller.go create mode 100644 controllers/actions.github.com/autoscalingrunnerset_controller_test.go create mode 100644 controllers/actions.github.com/clientutil.go create mode 100644 controllers/actions.github.com/constants.go create mode 100644 controllers/actions.github.com/ephemeralrunner_controller.go create mode 100644 controllers/actions.github.com/ephemeralrunner_controller_test.go create mode 100644 controllers/actions.github.com/ephemeralrunnerset_controller.go create mode 100644 controllers/actions.github.com/ephemeralrunnerset_controller_test.go create mode 100644 controllers/actions.github.com/resourcebuilder.go create mode 100644 controllers/actions.github.com/suite_test.go create mode 100644 controllers/actions.github.com/utils.go create mode 100644 controllers/actions.github.com/utils_test.go create mode 100644 github/actions/client.go create mode 100644 github/actions/client_generate_jit_test.go create mode 100644 github/actions/client_job_acquisition_test.go create mode 100644 github/actions/client_runner_scale_set_message_test.go create mode 100644 github/actions/client_runner_scale_set_session_test.go create mode 100644 github/actions/client_runner_scale_set_test.go create mode 100644 github/actions/client_runner_test.go create mode 100644 github/actions/errors.go create mode 100644 github/actions/fake/client.go create mode 100644 github/actions/fake/multi_client.go create mode 100644 github/actions/mock_ActionsService.go create mode 100644 github/actions/mock_SessionService.go create mode 100644 github/actions/multi_client.go create mode 100644 github/actions/multi_client_test.go create mode 100644 github/actions/sessionservice.go create mode 100644 github/actions/types.go diff --git a/Dockerfile b/Dockerfile index d199020205..51a60255fd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -37,6 +37,7 @@ RUN --mount=target=. \ --mount=type=cache,mode=0777,target=${GOCACHE} \ export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \ go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}'" -o /out/manager main.go && \ + go build -trimpath -ldflags="-s -w" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \ go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \ go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver @@ -49,6 +50,7 @@ WORKDIR / COPY --from=builder /out/manager . COPY --from=builder /out/github-webhook-server . COPY --from=builder /out/actions-metrics-server . +COPY --from=builder /out/github-runnerscaleset-listener . USER 65532:65532 diff --git a/Makefile b/Makefile index 4c4389f4fa..e29d42f045 100644 --- a/Makefile +++ b/Makefile @@ -86,6 +86,7 @@ test-with-deps: kube-apiserver etcd kubectl # Build manager binary manager: generate fmt vet go build -o bin/manager main.go + go build -o bin/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener # Run against the configured Kubernetes cluster in ~/.kube/config run: generate fmt vet manifests @@ -115,6 +116,10 @@ manifests-gen-crds: controller-gen yq chart-crds: cp config/crd/bases/*.yaml charts/actions-runner-controller/crds/ + rm charts/actions-runner-controller/crds/actions.github.com_autoscalingrunnersets.yaml + rm charts/actions-runner-controller/crds/actions.github.com_autoscalinglisteners.yaml + rm charts/actions-runner-controller/crds/actions.github.com_ephemeralrunnersets.yaml + rm charts/actions-runner-controller/crds/actions.github.com_ephemeralrunners.yaml # Run go fmt against code fmt: diff --git a/PROJECT b/PROJECT index 76dfd0be41..4e36bcb8b7 100644 --- a/PROJECT +++ b/PROJECT @@ -10,4 +10,16 @@ resources: - group: actions kind: RunnerDeployment version: v1alpha1 +- group: actions + kind: AutoscalingRunnerSet + version: v1alpha1 +- group: actions + kind: EphemeralRunnerSet + version: v1alpha1 +- group: actions + kind: EphemeralRunner + version: v1alpha1 +- group: actions + kind: AutoscalingListener + version: v1alpha1 version: "2" diff --git a/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go b/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go new file mode 100644 index 0000000000..68eb7664a3 --- /dev/null +++ b/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AutoscalingListenerSpec defines the desired state of AutoscalingListener +type AutoscalingListenerSpec struct { + // Required + GitHubConfigUrl string `json:"githubConfigUrl,omitempty"` + + // Required + GitHubConfigSecret string `json:"githubConfigSecret,omitempty"` + + // Required + RunnerScaleSetId int `json:"runnerScaleSetId,omitempty"` + + // Required + AutoscalingRunnerSetNamespace string `json:"autoscalingRunnerSetNamespace,omitempty"` + + // Required + AutoscalingRunnerSetName string `json:"autoscalingRunnerSetName,omitempty"` + + // Required + EphemeralRunnerSetName string `json:"ephemeralRunnerSetName,omitempty"` + + // Required + // +kubebuilder:validation:Minimum:=0 + MaxRunners int `json:"maxRunners,omitempty"` + + // Required + // +kubebuilder:validation:Minimum:=0 + MinRunners int `json:"minRunners,omitempty"` + + // Required + Image string `json:"image,omitempty"` + + // Required + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` +} + +// AutoscalingListenerStatus defines the observed state of AutoscalingListener +type AutoscalingListenerStatus struct { +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:JSONPath=".spec.githubConfigUrl",name=GitHub Configure URL,type=string +//+kubebuilder:printcolumn:JSONPath=".spec.autoscalingRunnerSetNamespace",name=AutoscalingRunnerSet Namespace,type=string +//+kubebuilder:printcolumn:JSONPath=".spec.autoscalingRunnerSetName",name=AutoscalingRunnerSet Name,type=string + +// AutoscalingListener is the Schema for the autoscalinglisteners API +type AutoscalingListener struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutoscalingListenerSpec `json:"spec,omitempty"` + Status AutoscalingListenerStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AutoscalingListenerList contains a list of AutoscalingListener +type AutoscalingListenerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutoscalingListener `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutoscalingListener{}, &AutoscalingListenerList{}) +} diff --git a/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go new file mode 100644 index 0000000000..a842ff83f1 --- /dev/null +++ b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go @@ -0,0 +1,143 @@ +/* +Copyright 2020 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/actions/actions-runner-controller/hash" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:JSONPath=".spec.minRunners",name=Minimum Runners,type=number +//+kubebuilder:printcolumn:JSONPath=".spec.maxRunners",name=Maximum Runners,type=number +//+kubebuilder:printcolumn:JSONPath=".status.currentRunners",name=Current Runners,type=number +//+kubebuilder:printcolumn:JSONPath=".status.state",name=State,type=string + +// AutoscalingRunnerSet is the Schema for the autoscalingrunnersets API +type AutoscalingRunnerSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutoscalingRunnerSetSpec `json:"spec,omitempty"` + Status AutoscalingRunnerSetStatus `json:"status,omitempty"` +} + +// AutoscalingRunnerSetSpec defines the desired state of AutoscalingRunnerSet +type AutoscalingRunnerSetSpec struct { + // Required + GitHubConfigUrl string `json:"githubConfigUrl,omitempty"` + + // Required + GitHubConfigSecret string `json:"githubConfigSecret,omitempty"` + + // +optional + RunnerGroup string `json:"runnerGroup,omitempty"` + + // +optional + Proxy *ProxyConfig `json:"proxy,omitempty"` + + // +optional + GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"` + + // Required + Template corev1.PodTemplateSpec `json:"template,omitempty"` + + // +optional + // +kubebuilder:validation:Minimum:=0 + MaxRunners *int `json:"maxRunners,omitempty"` + + // +optional + // +kubebuilder:validation:Minimum:=0 + MinRunners *int `json:"minRunners,omitempty"` +} + +type GitHubServerTLSConfig struct { + // Required + RootCAsConfigMapRef string `json:"certConfigMapRef,omitempty"` +} + +type ProxyConfig struct { + // +optional + HTTP *ProxyServerConfig `json:"http,omitempty"` + + // +optional + HTTPS *ProxyServerConfig `json:"https,omitempty"` +} + +type ProxyServerConfig struct { + // Required + Url string `json:"url,omitempty"` + + // +optional + CredentialSecretRef string `json:"credentialSecretRef,omitempty"` + + // +optional + NoProxy []string `json:"noProxy,omitempty"` +} + +// AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet +type AutoscalingRunnerSetStatus struct { + // +optional + CurrentRunners int `json:"currentRunners,omitempty"` + + // +optional + State string `json:"state,omitempty"` +} + +func (ars *AutoscalingRunnerSet) ListenerSpecHash() string { + type listenerSpec = AutoscalingRunnerSetSpec + arsSpec := ars.Spec.DeepCopy() + spec := arsSpec + return hash.ComputeTemplateHash(&spec) +} + +func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string { + type runnerSetSpec struct { + GitHubConfigUrl string + GitHubConfigSecret string + RunnerGroup string + Proxy *ProxyConfig + GitHubServerTLS *GitHubServerTLSConfig + Template corev1.PodTemplateSpec + } + spec := &runnerSetSpec{ + GitHubConfigUrl: ars.Spec.GitHubConfigUrl, + GitHubConfigSecret: ars.Spec.GitHubConfigSecret, + RunnerGroup: ars.Spec.RunnerGroup, + Proxy: ars.Spec.Proxy, + GitHubServerTLS: ars.Spec.GitHubServerTLS, + Template: ars.Spec.Template, + } + return hash.ComputeTemplateHash(&spec) +} + +//+kubebuilder:object:root=true + +// AutoscalingRunnerSetList contains a list of AutoscalingRunnerSet +type AutoscalingRunnerSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutoscalingRunnerSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutoscalingRunnerSet{}, &AutoscalingRunnerSetList{}) +} diff --git a/apis/actions.github.com/v1alpha1/ephemeralrunner_types.go b/apis/actions.github.com/v1alpha1/ephemeralrunner_types.go new file mode 100644 index 0000000000..dbfe040e0c --- /dev/null +++ b/apis/actions.github.com/v1alpha1/ephemeralrunner_types.go @@ -0,0 +1,130 @@ +/* +Copyright 2020 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.githubConfigUrl",name="GitHub Config URL",type=string +// +kubebuilder:printcolumn:JSONPath=".status.runnerId",name=RunnerId,type=number +// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string +// +kubebuilder:printcolumn:JSONPath=".status.jobRepositoryName",name=JobRepository,type=string +// +kubebuilder:printcolumn:JSONPath=".status.jobWorkflowRef",name=JobWorkflowRef,type=string +// +kubebuilder:printcolumn:JSONPath=".status.workflowRunId",name=WorkflowRunId,type=number +// +kubebuilder:printcolumn:JSONPath=".status.jobDisplayName",name=JobDisplayName,type=string +// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// EphemeralRunner is the Schema for the ephemeralrunners API +type EphemeralRunner struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec EphemeralRunnerSpec `json:"spec,omitempty"` + Status EphemeralRunnerStatus `json:"status,omitempty"` +} + +// EphemeralRunnerSpec defines the desired state of EphemeralRunner +type EphemeralRunnerSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // +required + GitHubConfigUrl string `json:"githubConfigUrl,omitempty"` + + // +required + GitHubConfigSecret string `json:"githubConfigSecret,omitempty"` + + // +required + RunnerScaleSetId int `json:"runnerScaleSetId,omitempty"` + + // +optional + Proxy *ProxyConfig `json:"proxy,omitempty"` + + // +optional + GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"` + + // +required + corev1.PodTemplateSpec `json:",inline"` +} + +// EphemeralRunnerStatus defines the observed state of EphemeralRunner +type EphemeralRunnerStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Turns true only if the runner is online. + // +optional + Ready bool `json:"ready"` + // Phase describes phases where EphemeralRunner can be in. + // The underlying type is a PodPhase, but the meaning is more restrictive + // + // The PodFailed phase should be set only when EphemeralRunner fails to start + // after multiple retries. That signals that this EphemeralRunner won't work, + // and manual inspection is required + // + // The PodSucceded phase should be set only when confirmed that EphemeralRunner + // actually executed the job and has been removed from the service. + // +optional + Phase corev1.PodPhase `json:"phase,omitempty"` + // +optional + Reason string `json:"reason,omitempty"` + // +optional + Message string `json:"message,omitempty"` + + // +optional + RunnerId int `json:"runnerId,omitempty"` + // +optional + RunnerName string `json:"runnerName,omitempty"` + // +optional + RunnerJITConfig string `json:"runnerJITConfig,omitempty"` + + // +optional + Failures map[string]bool `json:"failures,omitempty"` + + // +optional + JobRequestId int64 `json:"jobRequestId,omitempty"` + + // +optional + JobRepositoryName string `json:"jobRepositoryName,omitempty"` + + // +optional + JobWorkflowRef string `json:"jobWorkflowRef,omitempty"` + + // +optional + WorkflowRunId int64 `json:"workflowRunId,omitempty"` + + // +optional + JobDisplayName string `json:"jobDisplayName,omitempty"` +} + +//+kubebuilder:object:root=true + +// EphemeralRunnerList contains a list of EphemeralRunner +type EphemeralRunnerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EphemeralRunner `json:"items"` +} + +func init() { + SchemeBuilder.Register(&EphemeralRunner{}, &EphemeralRunnerList{}) +} diff --git a/apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go b/apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go new file mode 100644 index 0000000000..167296d640 --- /dev/null +++ b/apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go @@ -0,0 +1,61 @@ +/* +Copyright 2020 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EphemeralRunnerSetSpec defines the desired state of EphemeralRunnerSet +type EphemeralRunnerSetSpec struct { + // Replicas is the number of desired EphemeralRunner resources in the k8s namespace. + Replicas int `json:"replicas,omitempty"` + + EphemeralRunnerSpec EphemeralRunnerSpec `json:"ephemeralRunnerSpec,omitempty"` +} + +// EphemeralRunnerSetStatus defines the observed state of EphemeralRunnerSet +type EphemeralRunnerSetStatus struct { + // CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet. + CurrentReplicas int `json:"currentReplicas,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="DesiredReplicas",type="integer" +// +kubebuilder:printcolumn:JSONPath=".status.currentReplicas", name="CurrentReplicas",type="integer" +// EphemeralRunnerSet is the Schema for the ephemeralrunnersets API +type EphemeralRunnerSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec EphemeralRunnerSetSpec `json:"spec,omitempty"` + Status EphemeralRunnerSetStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// EphemeralRunnerSetList contains a list of EphemeralRunnerSet +type EphemeralRunnerSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EphemeralRunnerSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&EphemeralRunnerSet{}, &EphemeralRunnerSetList{}) +} diff --git a/apis/actions.github.com/v1alpha1/groupversion_info.go b/apis/actions.github.com/v1alpha1/groupversion_info.go new file mode 100644 index 0000000000..e4256706dc --- /dev/null +++ b/apis/actions.github.com/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the batch v1 API group +// +kubebuilder:object:generate=true +// +groupName=actions.github.com +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "actions.github.com", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go b/apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..753dd7fb3a --- /dev/null +++ b/apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,488 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingListener) DeepCopyInto(out *AutoscalingListener) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListener. +func (in *AutoscalingListener) DeepCopy() *AutoscalingListener { + if in == nil { + return nil + } + out := new(AutoscalingListener) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutoscalingListener) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingListenerList) DeepCopyInto(out *AutoscalingListenerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutoscalingListener, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListenerList. +func (in *AutoscalingListenerList) DeepCopy() *AutoscalingListenerList { + if in == nil { + return nil + } + out := new(AutoscalingListenerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutoscalingListenerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingListenerSpec) DeepCopyInto(out *AutoscalingListenerSpec) { + *out = *in + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]v1.LocalObjectReference, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListenerSpec. +func (in *AutoscalingListenerSpec) DeepCopy() *AutoscalingListenerSpec { + if in == nil { + return nil + } + out := new(AutoscalingListenerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingListenerStatus) DeepCopyInto(out *AutoscalingListenerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListenerStatus. +func (in *AutoscalingListenerStatus) DeepCopy() *AutoscalingListenerStatus { + if in == nil { + return nil + } + out := new(AutoscalingListenerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingRunnerSet) DeepCopyInto(out *AutoscalingRunnerSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSet. +func (in *AutoscalingRunnerSet) DeepCopy() *AutoscalingRunnerSet { + if in == nil { + return nil + } + out := new(AutoscalingRunnerSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutoscalingRunnerSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingRunnerSetList) DeepCopyInto(out *AutoscalingRunnerSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutoscalingRunnerSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSetList. +func (in *AutoscalingRunnerSetList) DeepCopy() *AutoscalingRunnerSetList { + if in == nil { + return nil + } + out := new(AutoscalingRunnerSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutoscalingRunnerSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingRunnerSetSpec) DeepCopyInto(out *AutoscalingRunnerSetSpec) { + *out = *in + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(ProxyConfig) + (*in).DeepCopyInto(*out) + } + if in.GitHubServerTLS != nil { + in, out := &in.GitHubServerTLS, &out.GitHubServerTLS + *out = new(GitHubServerTLSConfig) + **out = **in + } + in.Template.DeepCopyInto(&out.Template) + if in.MaxRunners != nil { + in, out := &in.MaxRunners, &out.MaxRunners + *out = new(int) + **out = **in + } + if in.MinRunners != nil { + in, out := &in.MinRunners, &out.MinRunners + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSetSpec. +func (in *AutoscalingRunnerSetSpec) DeepCopy() *AutoscalingRunnerSetSpec { + if in == nil { + return nil + } + out := new(AutoscalingRunnerSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingRunnerSetStatus) DeepCopyInto(out *AutoscalingRunnerSetStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingRunnerSetStatus. +func (in *AutoscalingRunnerSetStatus) DeepCopy() *AutoscalingRunnerSetStatus { + if in == nil { + return nil + } + out := new(AutoscalingRunnerSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralRunner) DeepCopyInto(out *EphemeralRunner) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunner. +func (in *EphemeralRunner) DeepCopy() *EphemeralRunner { + if in == nil { + return nil + } + out := new(EphemeralRunner) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EphemeralRunner) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralRunnerList) DeepCopyInto(out *EphemeralRunnerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EphemeralRunner, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerList. +func (in *EphemeralRunnerList) DeepCopy() *EphemeralRunnerList { + if in == nil { + return nil + } + out := new(EphemeralRunnerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EphemeralRunnerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralRunnerSet) DeepCopyInto(out *EphemeralRunnerSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSet. +func (in *EphemeralRunnerSet) DeepCopy() *EphemeralRunnerSet { + if in == nil { + return nil + } + out := new(EphemeralRunnerSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EphemeralRunnerSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralRunnerSetList) DeepCopyInto(out *EphemeralRunnerSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EphemeralRunnerSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSetList. +func (in *EphemeralRunnerSetList) DeepCopy() *EphemeralRunnerSetList { + if in == nil { + return nil + } + out := new(EphemeralRunnerSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EphemeralRunnerSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralRunnerSetSpec) DeepCopyInto(out *EphemeralRunnerSetSpec) { + *out = *in + in.EphemeralRunnerSpec.DeepCopyInto(&out.EphemeralRunnerSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSetSpec. +func (in *EphemeralRunnerSetSpec) DeepCopy() *EphemeralRunnerSetSpec { + if in == nil { + return nil + } + out := new(EphemeralRunnerSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralRunnerSetStatus) DeepCopyInto(out *EphemeralRunnerSetStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSetStatus. +func (in *EphemeralRunnerSetStatus) DeepCopy() *EphemeralRunnerSetStatus { + if in == nil { + return nil + } + out := new(EphemeralRunnerSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralRunnerSpec) DeepCopyInto(out *EphemeralRunnerSpec) { + *out = *in + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(ProxyConfig) + (*in).DeepCopyInto(*out) + } + if in.GitHubServerTLS != nil { + in, out := &in.GitHubServerTLS, &out.GitHubServerTLS + *out = new(GitHubServerTLSConfig) + **out = **in + } + in.PodTemplateSpec.DeepCopyInto(&out.PodTemplateSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerSpec. +func (in *EphemeralRunnerSpec) DeepCopy() *EphemeralRunnerSpec { + if in == nil { + return nil + } + out := new(EphemeralRunnerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralRunnerStatus) DeepCopyInto(out *EphemeralRunnerStatus) { + *out = *in + if in.Failures != nil { + in, out := &in.Failures, &out.Failures + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralRunnerStatus. +func (in *EphemeralRunnerStatus) DeepCopy() *EphemeralRunnerStatus { + if in == nil { + return nil + } + out := new(EphemeralRunnerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubServerTLSConfig) DeepCopyInto(out *GitHubServerTLSConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubServerTLSConfig. +func (in *GitHubServerTLSConfig) DeepCopy() *GitHubServerTLSConfig { + if in == nil { + return nil + } + out := new(GitHubServerTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(ProxyServerConfig) + (*in).DeepCopyInto(*out) + } + if in.HTTPS != nil { + in, out := &in.HTTPS, &out.HTTPS + *out = new(ProxyServerConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyServerConfig) DeepCopyInto(out *ProxyServerConfig) { + *out = *in + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyServerConfig. +func (in *ProxyServerConfig) DeepCopy() *ProxyServerConfig { + if in == nil { + return nil + } + out := new(ProxyServerConfig) + in.DeepCopyInto(out) + return out +} diff --git a/apis/actions.github.com/v1beta1/.keep b/apis/actions.github.com/v1beta1/.keep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/cmd/githubrunnerscalesetlistener/autoScalerKubernetesManager.go b/cmd/githubrunnerscalesetlistener/autoScalerKubernetesManager.go new file mode 100644 index 0000000000..20d828ac93 --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/autoScalerKubernetesManager.go @@ -0,0 +1,129 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + jsonpatch "github.com/evanphx/json-patch" + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +type AutoScalerKubernetesManager struct { + *kubernetes.Clientset + + logger logr.Logger +} + +func NewKubernetesManager(logger *logr.Logger) (*AutoScalerKubernetesManager, error) { + conf, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + + kubeClient, err := kubernetes.NewForConfig(conf) + if err != nil { + return nil, err + } + + var manager = &AutoScalerKubernetesManager{ + Clientset: kubeClient, + logger: logger.WithName("KubernetesManager"), + } + return manager, nil +} + +func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace, resourceName string, runnerCount int) error { + original := &v1alpha1.EphemeralRunnerSet{ + Spec: v1alpha1.EphemeralRunnerSetSpec{ + Replicas: -1, + }, + } + originalJson, err := json.Marshal(original) + if err != nil { + k.logger.Error(err, "could not marshal empty ephemeral runner set") + } + + patch := &v1alpha1.EphemeralRunnerSet{ + Spec: v1alpha1.EphemeralRunnerSetSpec{ + Replicas: runnerCount, + }, + } + patchJson, err := json.Marshal(patch) + if err != nil { + k.logger.Error(err, "could not marshal patch ephemeral runner set") + } + mergePatch, err := jsonpatch.CreateMergePatch(originalJson, patchJson) + if err != nil { + k.logger.Error(err, "could not create merge patch json for ephemeral runner set") + } + + k.logger.Info("Created merge patch json for EphemeralRunnerSet update", "json", string(mergePatch)) + + patchedEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{} + err = k.RESTClient(). + Patch(types.MergePatchType). + Prefix("apis", "actions.github.com", "v1alpha1"). + Namespace(namespace). + Resource("EphemeralRunnerSets"). + Name(resourceName). + Body([]byte(mergePatch)). + Do(ctx). + Into(patchedEphemeralRunnerSet) + if err != nil { + return fmt.Errorf("could not patch ephemeral runner set , patch JSON: %s, error: %w", string(mergePatch), err) + } + + k.logger.Info("Ephemeral runner set scaled.", "namespace", namespace, "name", resourceName, "replicas", patchedEphemeralRunnerSet.Spec.Replicas) + return nil +} + +func (k *AutoScalerKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName string, workflowRunId, jobRequestId int64) error { + original := &v1alpha1.EphemeralRunner{} + originalJson, err := json.Marshal(original) + if err != nil { + return fmt.Errorf("could not marshal empty ephemeral runner, error: %w", err) + } + + patch := &v1alpha1.EphemeralRunner{ + Status: v1alpha1.EphemeralRunnerStatus{ + JobRequestId: jobRequestId, + JobRepositoryName: fmt.Sprintf("%s/%s", ownerName, repositoryName), + WorkflowRunId: workflowRunId, + JobWorkflowRef: jobWorkflowRef, + JobDisplayName: jobDisplayName, + }, + } + patchedJson, err := json.Marshal(patch) + if err != nil { + return fmt.Errorf("could not marshal patched ephemeral runner, error: %w", err) + } + + mergePatch, err := jsonpatch.CreateMergePatch(originalJson, patchedJson) + if err != nil { + k.logger.Error(err, "could not create merge patch json for ephemeral runner") + } + + k.logger.Info("Created merge patch json for EphemeralRunner status update", "json", string(mergePatch)) + + patchedStatus := &v1alpha1.EphemeralRunner{} + err = k.RESTClient(). + Patch(types.MergePatchType). + Prefix("apis", "actions.github.com", "v1alpha1"). + Namespace(namespace). + Resource("EphemeralRunners"). + Name(resourceName). + SubResource("status"). + Body(mergePatch). + Do(ctx). + Into(patchedStatus) + if err != nil { + return fmt.Errorf("could not patch ephemeral runner status, patch JSON: %s, error: %w", string(mergePatch), err) + } + + return nil +} diff --git a/cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go b/cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go new file mode 100644 index 0000000000..2c8b53014b --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go @@ -0,0 +1,184 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "net/http" + "os" + "time" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/pkg/errors" +) + +const ( + sessionCreationMaxRetryCount = 10 +) + +type devContextKey bool + +var testIgnoreSleep devContextKey = true + +type AutoScalerClient struct { + client actions.SessionService + logger logr.Logger + + lastMessageId int64 + initialMessage *actions.RunnerScaleSetMessage +} + +func NewAutoScalerClient( + ctx context.Context, + client actions.ActionsService, + logger *logr.Logger, + runnerScaleSetId int, + options ...func(*AutoScalerClient), +) (*AutoScalerClient, error) { + listener := AutoScalerClient{ + logger: logger.WithName("auto_scaler"), + } + + session, initialMessage, err := createSession(ctx, &listener.logger, client, runnerScaleSetId) + if err != nil { + return nil, fmt.Errorf("fail to create session. %w", err) + } + + listener.lastMessageId = 0 + listener.initialMessage = initialMessage + listener.client = newSessionClient(client, logger, session) + + for _, option := range options { + option(&listener) + } + + return &listener, nil +} + +func createSession(ctx context.Context, logger *logr.Logger, client actions.ActionsService, runnerScaleSetId int) (*actions.RunnerScaleSetSession, *actions.RunnerScaleSetMessage, error) { + hostName, err := os.Hostname() + if err != nil { + hostName = uuid.New().String() + logger.Info("could not get hostname, fail back to a random string.", "fallback", hostName) + } + + var runnerScaleSetSession *actions.RunnerScaleSetSession + var retryCount int + for { + runnerScaleSetSession, err = client.CreateMessageSession(ctx, runnerScaleSetId, hostName) + if err == nil { + break + } + + clientSideError := &actions.HttpClientSideError{} + if errors.As(err, &clientSideError) && clientSideError.Code != http.StatusConflict { + logger.Info("unable to create message session. The error indicates something is wrong on the client side, won't make any retry.") + return nil, nil, fmt.Errorf("create message session http request failed. %w", err) + } + + retryCount++ + if retryCount >= sessionCreationMaxRetryCount { + return nil, nil, fmt.Errorf("create message session failed since it exceed %d retry limit. %w", sessionCreationMaxRetryCount, err) + } + + logger.Info("unable to create message session. Will try again in 30 seconds", "error", err.Error()) + if ok := ctx.Value(testIgnoreSleep); ok == nil { + time.Sleep(getRandomDuration(30, 45)) + } + } + + statistics, _ := json.Marshal(runnerScaleSetSession.Statistics) + logger.Info("current runner scale set statistics.", "statistics", string(statistics)) + + if runnerScaleSetSession.Statistics.TotalAvailableJobs > 0 || runnerScaleSetSession.Statistics.TotalAssignedJobs > 0 { + acquirableJobs, err := client.GetAcquirableJobs(ctx, runnerScaleSetId) + if err != nil { + return nil, nil, fmt.Errorf("get acquirable jobs failed. %w", err) + } + + acquirableJobsJson, err := json.Marshal(acquirableJobs.Jobs) + if err != nil { + return nil, nil, fmt.Errorf("marshal acquirable jobs failed. %w", err) + } + + initialMessage := &actions.RunnerScaleSetMessage{ + MessageId: 0, + MessageType: "RunnerScaleSetJobMessages", + Statistics: runnerScaleSetSession.Statistics, + Body: string(acquirableJobsJson), + } + + return runnerScaleSetSession, initialMessage, nil + } + + return runnerScaleSetSession, nil, nil +} + +func (m *AutoScalerClient) Close() error { + m.logger.Info("closing.") + return m.client.Close() +} + +func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error) error { + if m.initialMessage != nil { + err := handler(m.initialMessage) + if err != nil { + return fmt.Errorf("fail to process initial message. %w", err) + } + + m.initialMessage = nil + return nil + } + + for { + message, err := m.client.GetMessage(ctx, m.lastMessageId) + if err != nil { + return fmt.Errorf("get message failed from refreshing client. %w", err) + } + + if message == nil { + continue + } + + err = handler(message) + if err != nil { + return fmt.Errorf("handle message failed. %w", err) + } + + m.lastMessageId = message.MessageId + + return m.deleteMessage(ctx, message.MessageId) + } +} + +func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) error { + err := m.client.DeleteMessage(ctx, messageId) + if err != nil { + return fmt.Errorf("delete message failed from refreshing client. %w", err) + } + + m.logger.Info("deleted message.", "messageId", messageId) + return nil +} + +func (m *AutoScalerClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error { + m.logger.Info("acquiring jobs.", "request count", len(requestIds), "requestIds", fmt.Sprint(requestIds)) + if len(requestIds) == 0 { + return nil + } + + ids, err := m.client.AcquireJobs(ctx, requestIds) + if err != nil { + return fmt.Errorf("acquire jobs failed from refreshing client. %w", err) + } + + m.logger.Info("acquired jobs.", "requested", len(requestIds), "acquired", len(ids)) + return nil +} + +func getRandomDuration(minSeconds, maxSeconds int) time.Duration { + return time.Duration(rand.Intn(maxSeconds-minSeconds)+minSeconds) * time.Second +} diff --git a/cmd/githubrunnerscalesetlistener/autoScalerMessageListener_test.go b/cmd/githubrunnerscalesetlistener/autoScalerMessageListener_test.go new file mode 100644 index 0000000000..d0615515d2 --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/autoScalerMessageListener_test.go @@ -0,0 +1,701 @@ +package main + +import ( + "context" + "fmt" + "testing" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/actions/actions-runner-controller/logging" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestCreateSession(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1) + + require.NoError(t, err, "Error creating autoscaler client") + assert.Equal(t, session, session, "Session is not correct") + assert.Nil(t, asClient.initialMessage, "Initial message should be nil") + assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestCreateSession_CreateInitMessage(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAvailableJobs: 1, + TotalAssignedJobs: 5, + }, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{ + Count: 1, + Jobs: []actions.AcquirableJob{ + { + RunnerRequestId: 1, + OwnerName: "owner", + RepositoryName: "repo", + AcquireJobUrl: "https://github.com", + }, + }, + }, nil) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1) + + require.NoError(t, err, "Error creating autoscaler client") + assert.Equal(t, session, session, "Session is not correct") + assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil") + assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0") + assert.Equal(t, int64(0), asClient.initialMessage.MessageId, "Initial message id should be 0") + assert.Equal(t, "RunnerScaleSetJobMessages", asClient.initialMessage.MessageType, "Initial message type should be RunnerScaleSetJobMessages") + assert.Equal(t, 5, asClient.initialMessage.Statistics.TotalAssignedJobs, "Initial message total assigned jobs should be 5") + assert.Equal(t, 1, asClient.initialMessage.Statistics.TotalAvailableJobs, "Initial message total available jobs should be 1") + assert.Equal(t, "[{\"acquireJobUrl\":\"https://github.com\",\"messageType\":\"\",\"runnerRequestId\":1,\"repositoryName\":\"repo\",\"ownerName\":\"owner\",\"jobWorkflowRef\":\"\",\"eventName\":\"\",\"requestLabels\":null}]", asClient.initialMessage.Body, "Initial message body is not correct") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestCreateSession_CreateInitMessageWithOnlyAssignedJobs(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAssignedJobs: 5, + }, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{ + Count: 0, + Jobs: []actions.AcquirableJob{}, + }, nil) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1) + + require.NoError(t, err, "Error creating autoscaler client") + assert.Equal(t, session, session, "Session is not correct") + assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil") + assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0") + assert.Equal(t, int64(0), asClient.initialMessage.MessageId, "Initial message id should be 0") + assert.Equal(t, "RunnerScaleSetJobMessages", asClient.initialMessage.MessageType, "Initial message type should be RunnerScaleSetJobMessages") + assert.Equal(t, 5, asClient.initialMessage.Statistics.TotalAssignedJobs, "Initial message total assigned jobs should be 5") + assert.Equal(t, 0, asClient.initialMessage.Statistics.TotalAvailableJobs, "Initial message total available jobs should be 0") + assert.Equal(t, "[]", asClient.initialMessage.Body, "Initial message body is not correct") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestCreateSession_CreateInitMessageFailed(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAvailableJobs: 1, + TotalAssignedJobs: 5, + }, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(nil, fmt.Errorf("error")) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1) + + assert.ErrorContains(t, err, "get acquirable jobs failed. error", "Unexpected error") + assert.Nil(t, asClient, "Client should be nil") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestCreateSession_RetrySessionConflict(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.WithValue(context.Background(), testIgnoreSleep, true) + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(nil, &actions.HttpClientSideError{ + Code: 409, + }).Once() + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil).Once() + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1) + + require.NoError(t, err, "Error creating autoscaler client") + assert.Equal(t, session, session, "Session is not correct") + assert.Nil(t, asClient.initialMessage, "Initial message should be nil") + assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestCreateSession_RetrySessionConflict_RunOutOfRetry(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.WithValue(context.Background(), testIgnoreSleep, true) + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(nil, &actions.HttpClientSideError{ + Code: 409, + }) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1) + + assert.Error(t, err, "Error should be returned") + assert.Nil(t, asClient, "AutoScaler should be nil") + assert.True(t, mockActionsClient.AssertNumberOfCalls(t, "CreateMessageSession", sessionCreationMaxRetryCount), "CreateMessageSession should be called 10 times") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestCreateSession_NotRetryOnGeneralException(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.WithValue(context.Background(), testIgnoreSleep, true) + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(nil, &actions.HttpClientSideError{ + Code: 403, + }) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1) + + assert.Error(t, err, "Error should be returned") + assert.Nil(t, asClient, "AutoScaler should be nil") + assert.True(t, mockActionsClient.AssertNumberOfCalls(t, "CreateMessageSession", 1), "CreateMessageSession should be called 1 time and not retry on generic error") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestDeleteSession(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + mockSessionClient := &actions.MockSessionService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockSessionClient.On("Close").Return(nil) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { + asc.client = mockSessionClient + }) + require.NoError(t, err, "Error creating autoscaler client") + + err = asClient.Close() + assert.NoError(t, err, "Error deleting session") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met") +} + +func TestDeleteSession_Failed(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + mockSessionClient := &actions.MockSessionService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockSessionClient.On("Close").Return(fmt.Errorf("error")) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { + asc.client = mockSessionClient + }) + require.NoError(t, err, "Error creating autoscaler client") + + err = asClient.Close() + assert.Error(t, err, "Error should be returned") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met") +} + +func TestGetRunnerScaleSetMessage(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + mockSessionClient := &actions.MockSessionService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "test", + Body: "test", + }, nil) + mockSessionClient.On("DeleteMessage", ctx, int64(1)).Return(nil) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { + asc.client = mockSessionClient + }) + require.NoError(t, err, "Error creating autoscaler client") + + err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { + logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) + return nil + }) + + assert.NoError(t, err, "Error getting message") + assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met") +} + +func TestGetRunnerScaleSetMessage_HandleFailed(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + mockSessionClient := &actions.MockSessionService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "test", + Body: "test", + }, nil) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { + asc.client = mockSessionClient + }) + require.NoError(t, err, "Error creating autoscaler client") + + err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { + logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) + return fmt.Errorf("error") + }) + + assert.ErrorContains(t, err, "handle message failed. error", "Error getting message") + assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met") +} + +func TestGetRunnerScaleSetMessage_HandleInitialMessage(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAvailableJobs: 1, + TotalAssignedJobs: 2, + }, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{ + Count: 1, + Jobs: []actions.AcquirableJob{ + { + RunnerRequestId: 1, + OwnerName: "owner", + RepositoryName: "repo", + AcquireJobUrl: "https://github.com", + }, + }, + }, nil) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1) + require.NoError(t, err, "Error creating autoscaler client") + require.NotNil(t, asClient.initialMessage, "Initial message should be set") + + err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { + logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) + return nil + }) + + assert.NoError(t, err, "Error getting message") + assert.Nil(t, asClient.initialMessage, "Initial message should be nil") + assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestGetRunnerScaleSetMessage_HandleInitialMessageFailed(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAvailableJobs: 1, + TotalAssignedJobs: 2, + }, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{ + Count: 1, + Jobs: []actions.AcquirableJob{ + { + RunnerRequestId: 1, + OwnerName: "owner", + RepositoryName: "repo", + AcquireJobUrl: "https://github.com", + }, + }, + }, nil) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1) + require.NoError(t, err, "Error creating autoscaler client") + require.NotNil(t, asClient.initialMessage, "Initial message should be set") + + err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { + logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) + return fmt.Errorf("error") + }) + + assert.ErrorContains(t, err, "fail to process initial message. error", "Error getting message") + assert.NotNil(t, asClient.initialMessage, "Initial message should be nil") + assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestGetRunnerScaleSetMessage_RetryUntilGetMessage(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + mockSessionClient := &actions.MockSessionService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockSessionClient.On("GetMessage", ctx, int64(0)).Return(nil, nil).Times(3) + mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "test", + Body: "test", + }, nil).Once() + mockSessionClient.On("DeleteMessage", ctx, int64(1)).Return(nil) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { + asc.client = mockSessionClient + }) + require.NoError(t, err, "Error creating autoscaler client") + + err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { + logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) + return nil + }) + + assert.NoError(t, err, "Error getting message") + assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestGetRunnerScaleSetMessage_ErrorOnGetMessage(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + mockSessionClient := &actions.MockSessionService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockSessionClient.On("GetMessage", ctx, int64(0)).Return(nil, fmt.Errorf("error")) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { + asc.client = mockSessionClient + }) + require.NoError(t, err, "Error creating autoscaler client") + + err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { + return fmt.Errorf("Should not be called") + }) + + assert.ErrorContains(t, err, "get message failed from refreshing client. error", "Error should be returned") + assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met") +} + +func TestDeleteRunnerScaleSetMessage_Error(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + mockSessionClient := &actions.MockSessionService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "test", + Body: "test", + }, nil) + mockSessionClient.On("DeleteMessage", ctx, int64(1)).Return(fmt.Errorf("error")) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { + asc.client = mockSessionClient + }) + require.NoError(t, err, "Error creating autoscaler client") + + err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { + logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) + return nil + }) + + assert.ErrorContains(t, err, "delete message failed from refreshing client. error", "Error getting message") + assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestAcquireJobsForRunnerScaleSet(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + mockSessionClient := &actions.MockSessionService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockSessionClient.On("AcquireJobs", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return([]int64{1, 2, 3}, nil) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { + asc.client = mockSessionClient + }) + require.NoError(t, err, "Error creating autoscaler client") + + err = asClient.AcquireJobsForRunnerScaleSet(ctx, []int64{1, 2, 3}) + assert.NoError(t, err, "Error acquiring jobs") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met") +} + +func TestAcquireJobsForRunnerScaleSet_SkipEmptyList(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + mockSessionClient := &actions.MockSessionService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { + asc.client = mockSessionClient + }) + require.NoError(t, err, "Error creating autoscaler client") + + err = asClient.AcquireJobsForRunnerScaleSet(ctx, []int64{}) + assert.NoError(t, err, "Error acquiring jobs") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met") +} + +func TestAcquireJobsForRunnerScaleSet_Failed(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + mockSessionClient := &actions.MockSessionService{} + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + Statistics: &actions.RunnerScaleSetStatistic{}, + } + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockSessionClient.On("AcquireJobs", ctx, mock.Anything).Return(nil, fmt.Errorf("error")) + + asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { + asc.client = mockSessionClient + }) + require.NoError(t, err, "Error creating autoscaler client") + + err = asClient.AcquireJobsForRunnerScaleSet(ctx, []int64{1, 2, 3}) + assert.ErrorContains(t, err, "acquire jobs failed from refreshing client. error", "Expect error acquiring jobs") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met") +} diff --git a/cmd/githubrunnerscalesetlistener/autoScalerService.go b/cmd/githubrunnerscalesetlistener/autoScalerService.go new file mode 100644 index 0000000000..dd5ccaa373 --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/autoScalerService.go @@ -0,0 +1,185 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "math" + "strings" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/go-logr/logr" +) + +type ScaleSettings struct { + Namespace string + ResourceName string + MinRunners int + MaxRunners int +} + +type Service struct { + ctx context.Context + logger logr.Logger + rsClient RunnerScaleSetClient + kubeManager KubernetesManager + settings *ScaleSettings + currentRunnerCount int +} + +func NewService( + ctx context.Context, + rsClient RunnerScaleSetClient, + manager KubernetesManager, + settings *ScaleSettings, + options ...func(*Service), +) *Service { + s := &Service{ + ctx: ctx, + rsClient: rsClient, + kubeManager: manager, + settings: settings, + currentRunnerCount: 0, + logger: logr.FromContextOrDiscard(ctx), + } + + for _, option := range options { + option(s) + } + + return s +} + +func (s *Service) Start() error { + if s.settings.MinRunners > 0 { + s.logger.Info("scale to match minimal runners.") + err := s.scaleForAssignedJobCount(0) + if err != nil { + return fmt.Errorf("could not scale to match minimal runners. %w", err) + } + } + + for { + s.logger.Info("waiting for message...") + select { + case <-s.ctx.Done(): + s.logger.Info("service is stopped.") + return nil + default: + err := s.rsClient.GetRunnerScaleSetMessage(s.ctx, s.processMessage) + if err != nil { + return fmt.Errorf("could not get and process message. %w", err) + } + } + } +} + +func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error { + s.logger.Info("process message.", "messageId", message.MessageId, "messageType", message.MessageType) + if message.Statistics == nil { + return fmt.Errorf("can't process message with empty statistics") + } + + s.logger.Info("current runner scale set statistics.", + "available jobs", message.Statistics.TotalAvailableJobs, + "acquired jobs", message.Statistics.TotalAcquiredJobs, + "assigned jobs", message.Statistics.TotalAssignedJobs, + "running jobs", message.Statistics.TotalRunningJobs, + "registered runners", message.Statistics.TotalRegisteredRunners, + "busy runners", message.Statistics.TotalBusyRunners, + "idle runners", message.Statistics.TotalIdleRunners) + + if message.MessageType != "RunnerScaleSetJobMessages" { + s.logger.Info("skip message with unknown message type.", "messageType", message.MessageType) + return nil + } + + var batchedMessages []json.RawMessage + if err := json.NewDecoder(strings.NewReader(message.Body)).Decode(&batchedMessages); err != nil { + return fmt.Errorf("could not decode job messages. %w", err) + } + + s.logger.Info("process batched runner scale set job messages.", "messageId", message.MessageId, "batchSize", len(batchedMessages)) + + var availableJobs []int64 + for _, message := range batchedMessages { + var messageType actions.JobMessageType + if err := json.Unmarshal(message, &messageType); err != nil { + return fmt.Errorf("could not decode job message type. %w", err) + } + + switch messageType.MessageType { + case "JobAvailable": + var jobAvailable actions.JobAvailable + if err := json.Unmarshal(message, &jobAvailable); err != nil { + return fmt.Errorf("could not decode job available message. %w", err) + } + s.logger.Info("job available message received.", "RequestId", jobAvailable.RunnerRequestId) + availableJobs = append(availableJobs, jobAvailable.RunnerRequestId) + case "JobAssigned": + var jobAssigned actions.JobAssigned + if err := json.Unmarshal(message, &jobAssigned); err != nil { + return fmt.Errorf("could not decode job assigned message. %w", err) + } + s.logger.Info("job assigned message received.", "RequestId", jobAssigned.RunnerRequestId) + case "JobStarted": + var jobStarted actions.JobStarted + if err := json.Unmarshal(message, &jobStarted); err != nil { + return fmt.Errorf("could not decode job started message. %w", err) + } + s.logger.Info("job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId) + s.updateJobInfoForRunner(jobStarted) + case "JobCompleted": + var jobCompleted actions.JobCompleted + if err := json.Unmarshal(message, &jobCompleted); err != nil { + return fmt.Errorf("could not decode job completed message. %w", err) + } + s.logger.Info("job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName) + default: + s.logger.Info("unknown job message type.", "messageType", messageType.MessageType) + } + } + + err := s.rsClient.AcquireJobsForRunnerScaleSet(s.ctx, availableJobs) + if err != nil { + return fmt.Errorf("could not acquire jobs. %w", err) + } + + return s.scaleForAssignedJobCount(message.Statistics.TotalAssignedJobs) +} + +func (s *Service) scaleForAssignedJobCount(count int) error { + targetRunnerCount := int(math.Max(math.Min(float64(s.settings.MaxRunners), float64(count)), float64(s.settings.MinRunners))) + if targetRunnerCount != s.currentRunnerCount { + s.logger.Info("try scale runner request up/down base on assigned job count", + "assigned job", count, + "decision", targetRunnerCount, + "min", s.settings.MinRunners, + "max", s.settings.MaxRunners, + "currentRunnerCount", s.currentRunnerCount) + err := s.kubeManager.ScaleEphemeralRunnerSet(s.ctx, s.settings.Namespace, s.settings.ResourceName, targetRunnerCount) + if err != nil { + return fmt.Errorf("could not scale ephemeral runner set (%s/%s). %w", s.settings.Namespace, s.settings.ResourceName, err) + } + + s.currentRunnerCount = targetRunnerCount + } + + return nil +} + +// updateJobInfoForRunner updates the ephemeral runner with the job info and this is best effort since the info is only for better telemetry +func (s *Service) updateJobInfoForRunner(jobInfo actions.JobStarted) { + s.logger.Info("update job info for runner", + "runnerName", jobInfo.RunnerName, + "ownerName", jobInfo.OwnerName, + "repoName", jobInfo.RepositoryName, + "workflowRef", jobInfo.JobWorkflowRef, + "workflowRunId", jobInfo.WorkflowRunId, + "jobDisplayName", jobInfo.JobDisplayName, + "requestId", jobInfo.RunnerRequestId) + err := s.kubeManager.UpdateEphemeralRunnerWithJobInfo(s.ctx, s.settings.Namespace, jobInfo.RunnerName, jobInfo.OwnerName, jobInfo.RepositoryName, jobInfo.JobWorkflowRef, jobInfo.JobDisplayName, jobInfo.WorkflowRunId, jobInfo.RunnerRequestId) + if err != nil { + s.logger.Error(err, "could not update ephemeral runner with job info", "runnerName", jobInfo.RunnerName, "requestId", jobInfo.RunnerRequestId) + } +} diff --git a/cmd/githubrunnerscalesetlistener/autoScalerService_test.go b/cmd/githubrunnerscalesetlistener/autoScalerService_test.go new file mode 100644 index 0000000000..6581859adb --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/autoScalerService_test.go @@ -0,0 +1,631 @@ +package main + +import ( + "context" + "fmt" + "testing" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/actions/actions-runner-controller/logging" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestNewService(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 0, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + + assert.Equal(t, logger, service.logger) +} + +func TestStart(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 0, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() + + err := service.Start() + + assert.NoError(t, err, "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestStart_ScaleToMinRunners(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 5, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() + + err := service.Start() + + assert.NoError(t, err, "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestStart_ScaleToMinRunnersFailed(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 5, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once() + + err := service.Start() + + assert.ErrorContains(t, err, "could not scale to match minimal runners", "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestStart_GetMultipleMessages(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 0, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(5) + mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() + + err := service.Start() + + assert.NoError(t, err, "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestStart_ErrorOnMessage(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 0, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(2) + mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(fmt.Errorf("error")).Once() + + err := service.Start() + + assert.ErrorContains(t, err, "could not get and process message. error", "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestProcessMessage_NoStatistic(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 0, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + + err := service.processMessage(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "test", + Body: "test", + }) + + assert.ErrorContains(t, err, "can't process message with empty statistics", "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestProcessMessage_IgnoreUnknownMessageType(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 0, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + + err := service.processMessage(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "unknown", + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAvailableJobs: 1, + }, + Body: "[]", + }) + + assert.NoError(t, err, "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestProcessMessage_InvalidBatchMessageJson(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 0, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + + err := service.processMessage(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "RunnerScaleSetJobMessages", + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAvailableJobs: 1, + }, + Body: "invalid json", + }) + + assert.ErrorContains(t, err, "could not decode job messages", "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestProcessMessage_InvalidJobMessageJson(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 0, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + + err := service.processMessage(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "RunnerScaleSetJobMessages", + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAvailableJobs: 1, + }, + Body: "[\"something\", \"test\"]", + }) + + assert.ErrorContains(t, err, "could not decode job message type", "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestProcessMessage_MultipleMessages(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 1, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once() + mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() + + err := service.processMessage(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "RunnerScaleSetJobMessages", + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAssignedJobs: 2, + TotalAvailableJobs: 2, + }, + Body: "[{\"messageType\":\"JobAvailable\", \"runnerRequestId\": 3},{\"messageType\":\"JobAvailable\", \"runnerRequestId\": 4},{\"messageType\":\"JobAssigned\", \"runnerRequestId\": 2}, {\"messageType\":\"JobCompleted\", \"runnerRequestId\": 1, \"result\":\"succeed\"},{\"messageType\":\"unknown\"}]", + }) + + assert.NoError(t, err, "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestProcessMessage_AcquireJobsFailed(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 0, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 })).Return(fmt.Errorf("error")).Once() + + err := service.processMessage(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "RunnerScaleSetJobMessages", + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAssignedJobs: 1, + TotalAvailableJobs: 1, + }, + Body: "[{\"messageType\":\"JobAvailable\", \"runnerRequestId\": 1}]", + }) + + assert.ErrorContains(t, err, "could not acquire jobs. error", "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestScaleForAssignedJobCount_DeDupScale(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 0, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once() + + err := service.scaleForAssignedJobCount(2) + require.NoError(t, err, "Unexpected error") + err = service.scaleForAssignedJobCount(2) + require.NoError(t, err, "Unexpected error") + err = service.scaleForAssignedJobCount(2) + require.NoError(t, err, "Unexpected error") + err = service.scaleForAssignedJobCount(2) + + assert.NoError(t, err, "Unexpected error") + assert.Equal(t, 2, service.currentRunnerCount, "Unexpected runner count") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestScaleForAssignedJobCount_ScaleWithinMinMax(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 1, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once() + mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(nil).Once() + mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once() + mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once() + mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once() + + err := service.scaleForAssignedJobCount(0) + require.NoError(t, err, "Unexpected error") + err = service.scaleForAssignedJobCount(3) + require.NoError(t, err, "Unexpected error") + err = service.scaleForAssignedJobCount(5) + require.NoError(t, err, "Unexpected error") + err = service.scaleForAssignedJobCount(1) + require.NoError(t, err, "Unexpected error") + err = service.scaleForAssignedJobCount(10) + + assert.NoError(t, err, "Unexpected error") + assert.Equal(t, 5, service.currentRunnerCount, "Unexpected runner count") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestScaleForAssignedJobCount_ScaleFailed(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 1, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(fmt.Errorf("error")) + + err := service.scaleForAssignedJobCount(2) + + assert.ErrorContains(t, err, "could not scale ephemeral runner set (namespace/resource). error", "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestProcessMessage_JobStartedMessage(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 1, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + service.currentRunnerCount = 1 + + mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() + mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once() + + err := service.processMessage(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "RunnerScaleSetJobMessages", + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAssignedJobs: 1, + TotalAvailableJobs: 0, + }, + Body: "[{\"messageType\":\"JobStarted\", \"runnerRequestId\": 3, \"runnerId\": 1, \"runnerName\": \"runner1\", \"ownerName\": \"owner1\", \"repositoryName\": \"repo1\", \"jobWorkflowRef\": \".github/workflows/ci.yaml\", \"jobDisplayName\": \"job1\", \"workflowRunId\": 100 }]", + }) + + assert.NoError(t, err, "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} + +func TestProcessMessage_JobStartedMessageIgnoreRunnerUpdateError(t *testing.T) { + mockRsClient := &MockRunnerScaleSetClient{} + mockKubeManager := &MockKubernetesManager{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + service := NewService( + ctx, + mockRsClient, + mockKubeManager, + &ScaleSettings{ + Namespace: "namespace", + ResourceName: "resource", + MinRunners: 1, + MaxRunners: 5, + }, + func(s *Service) { + s.logger = logger + }, + ) + service.currentRunnerCount = 1 + + mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(fmt.Errorf("error")).Once() + mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once() + + err := service.processMessage(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "RunnerScaleSetJobMessages", + Statistics: &actions.RunnerScaleSetStatistic{ + TotalAssignedJobs: 0, + TotalAvailableJobs: 0, + }, + Body: "[{\"messageType\":\"JobStarted\", \"runnerRequestId\": 3, \"runnerId\": 1, \"runnerName\": \"runner1\", \"ownerName\": \"owner1\", \"repositoryName\": \"repo1\", \"jobWorkflowRef\": \".github/workflows/ci.yaml\", \"jobDisplayName\": \"job1\", \"workflowRunId\": 100 }]", + }) + + assert.NoError(t, err, "Unexpected error") + assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met") + assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met") +} diff --git a/cmd/githubrunnerscalesetlistener/kubernetesManager.go b/cmd/githubrunnerscalesetlistener/kubernetesManager.go new file mode 100644 index 0000000000..f8e9058c9e --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/kubernetesManager.go @@ -0,0 +1,12 @@ +package main + +import ( + "context" +) + +//go:generate mockery --inpackage --name=KubernetesManager +type KubernetesManager interface { + ScaleEphemeralRunnerSet(ctx context.Context, namespace, resourceName string, runnerCount int) error + + UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName string, jobRequestId, workflowRunId int64) error +} diff --git a/cmd/githubrunnerscalesetlistener/main.go b/cmd/githubrunnerscalesetlistener/main.go new file mode 100644 index 0000000000..583ae2682d --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/main.go @@ -0,0 +1,151 @@ +/* +Copyright 2021 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/actions/actions-runner-controller/logging" + "github.com/go-logr/logr" + "github.com/kelseyhightower/envconfig" +) + +type RunnerScaleSetListenerConfig struct { + ConfigureUrl string `split_words:"true"` + AppID int64 `split_words:"true"` + AppInstallationID int64 `split_words:"true"` + AppPrivateKey string `split_words:"true"` + Token string `split_words:"true"` + EphemeralRunnerSetNamespace string `split_words:"true"` + EphemeralRunnerSetName string `split_words:"true"` + MaxRunners int `split_words:"true"` + MinRunners int `split_words:"true"` + RunnerScaleSetId int `split_words:"true"` +} + +func main() { + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err) + os.Exit(1) + } + + var rc RunnerScaleSetListenerConfig + if err := envconfig.Process("github", &rc); err != nil { + logger.Error(err, "Error: processing environment variables for RunnerScaleSetListenerConfig") + os.Exit(1) + } + + // Validate all inputs + if err := validateConfig(&rc); err != nil { + logger.Error(err, "Inputs validation failed") + os.Exit(1) + } + + if err := run(rc, logger); err != nil { + logger.Error(err, "Run error") + os.Exit(1) + } +} + +func run(rc RunnerScaleSetListenerConfig, logger logr.Logger) error { + // Create root context and hook with sigint and sigterm + ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer stop() + + creds := &actions.ActionsAuth{} + if rc.Token != "" { + creds.Token = rc.Token + } else { + creds.AppCreds = &actions.GitHubAppAuth{ + AppID: rc.AppID, + AppInstallationID: rc.AppInstallationID, + AppPrivateKey: rc.AppPrivateKey, + } + } + + actionsServiceClient, err := actions.NewClient(ctx, rc.ConfigureUrl, creds, "actions-runner-controller", logger) + if err != nil { + return fmt.Errorf("failed to create an Actions Service client: %w", err) + } + + // Create message listener + autoScalerClient, err := NewAutoScalerClient(ctx, actionsServiceClient, &logger, rc.RunnerScaleSetId) + if err != nil { + return fmt.Errorf("failed to create a message listener: %w", err) + } + defer autoScalerClient.Close() + + // Create kube manager and scale controller + kubeManager, err := NewKubernetesManager(&logger) + if err != nil { + return fmt.Errorf("failed to create kubernetes manager: %w", err) + } + + scaleSettings := &ScaleSettings{ + Namespace: rc.EphemeralRunnerSetNamespace, + ResourceName: rc.EphemeralRunnerSetName, + MaxRunners: rc.MaxRunners, + MinRunners: rc.MinRunners, + } + + service := NewService(ctx, autoScalerClient, kubeManager, scaleSettings, func(s *Service) { + s.logger = logger.WithName("service") + }) + + // Start listening for messages + if err = service.Start(); err != nil { + return fmt.Errorf("failed to start message queue listener: %w", err) + } + return nil +} + +func validateConfig(config *RunnerScaleSetListenerConfig) error { + if len(config.ConfigureUrl) == 0 { + return fmt.Errorf("GitHubConfigUrl is not provided") + } + + if len(config.EphemeralRunnerSetNamespace) == 0 || len(config.EphemeralRunnerSetName) == 0 { + return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", config.EphemeralRunnerSetNamespace, config.EphemeralRunnerSetName) + } + + if config.RunnerScaleSetId == 0 { + return fmt.Errorf("RunnerScaleSetId '%d' is missing", config.RunnerScaleSetId) + } + + if config.MaxRunners < config.MinRunners { + return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", config.MinRunners, config.MaxRunners) + } + + hasToken := len(config.Token) > 0 + hasPrivateKeyConfig := config.AppID > 0 && config.AppPrivateKey != "" + + if !hasToken && !hasPrivateKeyConfig { + return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey)) + } + + if hasToken && hasPrivateKeyConfig { + return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey)) + } + + return nil +} diff --git a/cmd/githubrunnerscalesetlistener/main_test.go b/cmd/githubrunnerscalesetlistener/main_test.go new file mode 100644 index 0000000000..bd2d71879f --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/main_test.go @@ -0,0 +1,92 @@ +package main + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestConfigValidationMinMax(t *testing.T) { + config := &RunnerScaleSetListenerConfig{ + ConfigureUrl: "github.com/some_org/some_repo", + EphemeralRunnerSetNamespace: "namespace", + EphemeralRunnerSetName: "deployment", + RunnerScaleSetId: 1, + MinRunners: 5, + MaxRunners: 2, + Token: "token", + } + err := validateConfig(config) + assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners") +} + +func TestConfigValidationMissingToken(t *testing.T) { + config := &RunnerScaleSetListenerConfig{ + ConfigureUrl: "github.com/some_org/some_repo", + EphemeralRunnerSetNamespace: "namespace", + EphemeralRunnerSetName: "deployment", + RunnerScaleSetId: 1, + } + err := validateConfig(config) + expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey)) + assert.ErrorContains(t, err, expectedError, "Expected error about missing auth") +} + +func TestConfigValidationAppKey(t *testing.T) { + config := &RunnerScaleSetListenerConfig{ + AppID: 1, + AppInstallationID: 10, + ConfigureUrl: "github.com/some_org/some_repo", + EphemeralRunnerSetNamespace: "namespace", + EphemeralRunnerSetName: "deployment", + RunnerScaleSetId: 1, + } + err := validateConfig(config) + expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey)) + assert.ErrorContains(t, err, expectedError, "Expected error about missing auth") +} + +func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) { + config := &RunnerScaleSetListenerConfig{ + AppID: 1, + AppInstallationID: 10, + AppPrivateKey: "asdf", + Token: "asdf", + ConfigureUrl: "github.com/some_org/some_repo", + EphemeralRunnerSetNamespace: "namespace", + EphemeralRunnerSetName: "deployment", + RunnerScaleSetId: 1, + } + err := validateConfig(config) + expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey)) + assert.ErrorContains(t, err, expectedError, "Expected error about missing auth") +} + +func TestConfigValidation(t *testing.T) { + config := &RunnerScaleSetListenerConfig{ + ConfigureUrl: "https://github.com/actions", + EphemeralRunnerSetNamespace: "namespace", + EphemeralRunnerSetName: "deployment", + RunnerScaleSetId: 1, + MinRunners: 1, + MaxRunners: 5, + Token: "asdf", + } + + err := validateConfig(config) + + assert.NoError(t, err, "Expected no error") +} + +func TestConfigValidationConfigUrl(t *testing.T) { + config := &RunnerScaleSetListenerConfig{ + EphemeralRunnerSetNamespace: "namespace", + EphemeralRunnerSetName: "deployment", + RunnerScaleSetId: 1, + } + + err := validateConfig(config) + + assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl") +} diff --git a/cmd/githubrunnerscalesetlistener/messageListener.go b/cmd/githubrunnerscalesetlistener/messageListener.go new file mode 100644 index 0000000000..0f01db5839 --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/messageListener.go @@ -0,0 +1,13 @@ +package main + +import ( + "context" + + "github.com/actions/actions-runner-controller/github/actions" +) + +//go:generate mockery --inpackage --name=RunnerScaleSetClient +type RunnerScaleSetClient interface { + GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error) error + AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error +} diff --git a/cmd/githubrunnerscalesetlistener/mock_KubernetesManager.go b/cmd/githubrunnerscalesetlistener/mock_KubernetesManager.go new file mode 100644 index 0000000000..2e941f1096 --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/mock_KubernetesManager.go @@ -0,0 +1,57 @@ +// Code generated by mockery v2.16.0. DO NOT EDIT. + +package main + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// MockKubernetesManager is an autogenerated mock type for the KubernetesManager type +type MockKubernetesManager struct { + mock.Mock +} + +// ScaleEphemeralRunnerSet provides a mock function with given fields: ctx, namespace, resourceName, runnerCount +func (_m *MockKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace string, resourceName string, runnerCount int) error { + ret := _m.Called(ctx, namespace, resourceName, runnerCount) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, int) error); ok { + r0 = rf(ctx, namespace, resourceName, runnerCount) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateEphemeralRunnerWithJobInfo provides a mock function with given fields: ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId +func (_m *MockKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace string, resourceName string, ownerName string, repositoryName string, jobWorkflowRef string, jobDisplayName string, jobRequestId int64, workflowRunId int64) error { + ret := _m.Called(ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string, string, int64, int64) error); ok { + r0 = rf(ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type mockConstructorTestingTNewMockKubernetesManager interface { + mock.TestingT + Cleanup(func()) +} + +// NewMockKubernetesManager creates a new instance of MockKubernetesManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMockKubernetesManager(t mockConstructorTestingTNewMockKubernetesManager) *MockKubernetesManager { + mock := &MockKubernetesManager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go b/cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go new file mode 100644 index 0000000000..4efae0a933 --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go @@ -0,0 +1,59 @@ +// Code generated by mockery v2.16.0. DO NOT EDIT. + +package main + +import ( + context "context" + + actions "github.com/actions/actions-runner-controller/github/actions" + + mock "github.com/stretchr/testify/mock" +) + +// MockRunnerScaleSetClient is an autogenerated mock type for the RunnerScaleSetClient type +type MockRunnerScaleSetClient struct { + mock.Mock +} + +// AcquireJobsForRunnerScaleSet provides a mock function with given fields: ctx, requestIds +func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error { + ret := _m.Called(ctx, requestIds) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []int64) error); ok { + r0 = rf(ctx, requestIds) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler +func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error) error { + ret := _m.Called(ctx, handler) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, func(*actions.RunnerScaleSetMessage) error) error); ok { + r0 = rf(ctx, handler) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type mockConstructorTestingTNewMockRunnerScaleSetClient interface { + mock.TestingT + Cleanup(func()) +} + +// NewMockRunnerScaleSetClient creates a new instance of MockRunnerScaleSetClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMockRunnerScaleSetClient(t mockConstructorTestingTNewMockRunnerScaleSetClient) *MockRunnerScaleSetClient { + mock := &MockRunnerScaleSetClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go b/cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go new file mode 100644 index 0000000000..11df7e210a --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go @@ -0,0 +1,123 @@ +package main + +import ( + "context" + "fmt" + "time" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/go-logr/logr" + "github.com/pkg/errors" +) + +type SessionRefreshingClient struct { + client actions.ActionsService + logger logr.Logger + session *actions.RunnerScaleSetSession +} + +func newSessionClient(client actions.ActionsService, logger *logr.Logger, session *actions.RunnerScaleSetSession) *SessionRefreshingClient { + return &SessionRefreshingClient{ + client: client, + session: session, + logger: logger.WithName("refreshing_client"), + } +} + +func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64) (*actions.RunnerScaleSetMessage, error) { + message, err := m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId) + if err == nil { + return message, nil + } + + expiredError := &actions.MessageQueueTokenExpiredError{} + if !errors.As(err, &expiredError) { + return nil, fmt.Errorf("get message failed. %w", err) + } + + m.logger.Info("message queue token is expired during GetNextMessage, refreshing...") + session, err := m.client.RefreshMessageSession(ctx, m.session.RunnerScaleSet.Id, m.session.SessionId) + if err != nil { + return nil, fmt.Errorf("refresh message session failed. %w", err) + } + + m.session = session + message, err = m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId) + if err != nil { + return nil, fmt.Errorf("delete message failed after refresh message session. %w", err) + } + + return message, nil +} + +func (m *SessionRefreshingClient) DeleteMessage(ctx context.Context, messageId int64) error { + err := m.client.DeleteMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, messageId) + if err == nil { + return nil + } + + expiredError := &actions.MessageQueueTokenExpiredError{} + if !errors.As(err, &expiredError) { + return fmt.Errorf("delete message failed. %w", err) + } + + m.logger.Info("message queue token is expired during DeleteMessage, refreshing...") + session, err := m.client.RefreshMessageSession(ctx, m.session.RunnerScaleSet.Id, m.session.SessionId) + if err != nil { + return fmt.Errorf("refresh message session failed. %w", err) + } + + m.session = session + err = m.client.DeleteMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, messageId) + if err != nil { + return fmt.Errorf("delete message failed after refresh message session. %w", err) + } + + return nil + +} + +func (m *SessionRefreshingClient) AcquireJobs(ctx context.Context, requestIds []int64) ([]int64, error) { + ids, err := m.client.AcquireJobs(ctx, m.session.RunnerScaleSet.Id, m.session.MessageQueueAccessToken, requestIds) + if err == nil { + return ids, nil + } + + expiredError := &actions.MessageQueueTokenExpiredError{} + if !errors.As(err, &expiredError) { + return nil, fmt.Errorf("acquire jobs failed. %w", err) + } + + m.logger.Info("message queue token is expired during AcquireJobs, refreshing...") + session, err := m.client.RefreshMessageSession(ctx, m.session.RunnerScaleSet.Id, m.session.SessionId) + if err != nil { + return nil, fmt.Errorf("refresh message session failed. %w", err) + } + + m.session = session + ids, err = m.client.AcquireJobs(ctx, m.session.RunnerScaleSet.Id, m.session.MessageQueueAccessToken, requestIds) + if err != nil { + return nil, fmt.Errorf("acquire jobs failed after refresh message session. %w", err) + } + + return ids, nil +} + +func (m *SessionRefreshingClient) Close() error { + if m.session == nil { + m.logger.Info("session is already deleted. (no-op)") + return nil + } + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + m.logger.Info("deleting session.") + err := m.client.DeleteMessageSession(ctxWithTimeout, m.session.RunnerScaleSet.Id, m.session.SessionId) + if err != nil { + return fmt.Errorf("delete message session failed. %w", err) + } + + m.session = nil + return nil +} diff --git a/cmd/githubrunnerscalesetlistener/sessionrefreshingclient_test.go b/cmd/githubrunnerscalesetlistener/sessionrefreshingclient_test.go new file mode 100644 index 0000000000..1423a0ce70 --- /dev/null +++ b/cmd/githubrunnerscalesetlistener/sessionrefreshingclient_test.go @@ -0,0 +1,421 @@ +package main + +import ( + "context" + "fmt" + "testing" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/actions/actions-runner-controller/logging" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestGetMessage(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, nil).Once() + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(&actions.RunnerScaleSetMessage{MessageId: 1}, nil).Once() + + client := newSessionClient(mockActionsClient, &logger, session) + + msg, err := client.GetMessage(ctx, 0) + require.NoError(t, err, "GetMessage should not return an error") + + assert.Nil(t, msg, "GetMessage should return nil message") + + msg, err = client.GetMessage(ctx, 0) + require.NoError(t, err, "GetMessage should not return an error") + + assert.Equal(t, int64(1), msg.MessageId, "GetMessage should return a message with id 1") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made") +} + +func TestDeleteMessage(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + + mockActionsClient.On("DeleteMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(1)).Return(nil).Once() + + client := newSessionClient(mockActionsClient, &logger, session) + + err := client.DeleteMessage(ctx, int64(1)) + assert.NoError(t, err, "DeleteMessage should not return an error") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made") +} + +func TestAcquireJobs(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + mockActionsClient.On("AcquireJobs", ctx, mock.Anything, "token", mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return([]int64{1}, nil) + + client := newSessionClient(mockActionsClient, &logger, session) + + ids, err := client.AcquireJobs(ctx, []int64{1, 2, 3}) + assert.NoError(t, err, "AcquireJobs should not return an error") + assert.Equal(t, []int64{1}, ids, "AcquireJobs should return a slice with one id") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made") +} + +func TestClose(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + + mockActionsClient.On("DeleteMessageSession", mock.Anything, 1, &sessionId).Return(nil).Once() + + client := newSessionClient(mockActionsClient, &logger, session) + + err := client.Close() + assert.NoError(t, err, "DeleteMessageSession should not return an error") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made") +} + +func TestGetMessage_Error(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, fmt.Errorf("error")).Once() + + client := newSessionClient(mockActionsClient, &logger, session) + + msg, err := client.GetMessage(ctx, 0) + assert.ErrorContains(t, err, "get message failed. error", "GetMessage should return an error") + assert.Nil(t, msg, "GetMessage should return nil message") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made") +} + +func TestDeleteMessage_SessionError(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + + mockActionsClient.On("DeleteMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(1)).Return(fmt.Errorf("error")).Once() + + client := newSessionClient(mockActionsClient, &logger, session) + + err := client.DeleteMessage(ctx, int64(1)) + assert.ErrorContains(t, err, "delete message failed. error", "DeleteMessage should return an error") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made") +} + +func TestAcquireJobs_Error(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + mockActionsClient.On("AcquireJobs", ctx, mock.Anything, "token", mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return(nil, fmt.Errorf("error")).Once() + + client := newSessionClient(mockActionsClient, &logger, session) + + ids, err := client.AcquireJobs(ctx, []int64{1, 2, 3}) + assert.ErrorContains(t, err, "acquire jobs failed. error", "AcquireJobs should return an error") + assert.Nil(t, ids, "AcquireJobs should return nil ids") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made") +} + +func TestGetMessage_RefreshToken(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, "token2", int64(0)).Return(&actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "test", + Body: "test", + }, nil).Once() + mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(&actions.RunnerScaleSetSession{ + SessionId: &sessionId, + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token2", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + }, nil).Once() + + client := newSessionClient(mockActionsClient, &logger, session) + msg, err := client.GetMessage(ctx, 0) + assert.NoError(t, err, "Error getting message") + assert.Equal(t, int64(1), msg.MessageId, "message id should be updated") + assert.Equal(t, "token2", client.session.MessageQueueAccessToken, "Message queue access token should be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestDeleteMessage_RefreshSessionToken(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + + mockActionsClient.On("DeleteMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(1)).Return(&actions.MessageQueueTokenExpiredError{}).Once() + mockActionsClient.On("DeleteMessage", ctx, session.MessageQueueUrl, "token2", int64(1)).Return(nil).Once() + mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(&actions.RunnerScaleSetSession{ + SessionId: &sessionId, + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token2", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + }, nil) + + client := newSessionClient(mockActionsClient, &logger, session) + err := client.DeleteMessage(ctx, 1) + assert.NoError(t, err, "Error delete message") + assert.Equal(t, "token2", client.session.MessageQueueAccessToken, "Message queue access token should be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestAcquireJobs_RefreshToken(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + + mockActionsClient.On("AcquireJobs", ctx, mock.Anything, session.MessageQueueAccessToken, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() + mockActionsClient.On("AcquireJobs", ctx, mock.Anything, "token2", mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return([]int64{1, 2, 3}, nil) + mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(&actions.RunnerScaleSetSession{ + SessionId: &sessionId, + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token2", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + }, nil) + + client := newSessionClient(mockActionsClient, &logger, session) + ids, err := client.AcquireJobs(ctx, []int64{1, 2, 3}) + assert.NoError(t, err, "Error acquiring jobs") + assert.Equal(t, []int64{1, 2, 3}, ids, "Job ids should be returned") + assert.Equal(t, "token2", client.session.MessageQueueAccessToken, "Message queue access token should be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestGetMessage_RefreshToken_Failed(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() + mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(nil, fmt.Errorf("error")) + + client := newSessionClient(mockActionsClient, &logger, session) + msg, err := client.GetMessage(ctx, 0) + assert.ErrorContains(t, err, "refresh message session failed. error", "Error should be returned") + assert.Nil(t, msg, "Message should be nil") + assert.Equal(t, "token", client.session.MessageQueueAccessToken, "Message queue access token should not be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestDeleteMessage_RefreshToken_Failed(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + mockActionsClient.On("DeleteMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(1)).Return(&actions.MessageQueueTokenExpiredError{}).Once() + mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(nil, fmt.Errorf("error")) + + client := newSessionClient(mockActionsClient, &logger, session) + err := client.DeleteMessage(ctx, 1) + + assert.ErrorContains(t, err, "refresh message session failed. error", "Error getting message") + assert.Equal(t, "token", client.session.MessageQueueAccessToken, "Message queue access token should not be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestAcquireJobs_RefreshToken_Failed(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + ctx := context.Background() + sessionId := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &sessionId, + OwnerName: "owner", + MessageQueueUrl: "https://github.com", + MessageQueueAccessToken: "token", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + }, + } + + mockActionsClient.On("AcquireJobs", ctx, mock.Anything, session.MessageQueueAccessToken, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() + mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(nil, fmt.Errorf("error")) + + client := newSessionClient(mockActionsClient, &logger, session) + ids, err := client.AcquireJobs(ctx, []int64{1, 2, 3}) + assert.ErrorContains(t, err, "refresh message session failed. error", "Expect error refreshing message session") + assert.Nil(t, ids, "Job ids should be nil") + assert.Equal(t, "token", client.session.MessageQueueAccessToken, "Message queue access token should not be updated") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} + +func TestClose_Skip(t *testing.T) { + mockActionsClient := &actions.MockActionsService{} + logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + logger = logger.WithName(t.Name()) + require.NoError(t, log_err, "Error creating logger") + + client := newSessionClient(mockActionsClient, &logger, nil) + err := client.Close() + require.NoError(t, err, "Error closing session client") + assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met") +} diff --git a/config/crd/bases/actions.github.com_autoscalinglisteners.yaml b/config/crd/bases/actions.github.com_autoscalinglisteners.yaml new file mode 100644 index 0000000000..18946cb318 --- /dev/null +++ b/config/crd/bases/actions.github.com_autoscalinglisteners.yaml @@ -0,0 +1,97 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + name: autoscalinglisteners.actions.github.com +spec: + group: actions.github.com + names: + kind: AutoscalingListener + listKind: AutoscalingListenerList + plural: autoscalinglisteners + singular: autoscalinglistener + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.githubConfigUrl + name: GitHub Configure URL + type: string + - jsonPath: .spec.autoscalingRunnerSetNamespace + name: AutoscalingRunnerSet Namespace + type: string + - jsonPath: .spec.autoscalingRunnerSetName + name: AutoscalingRunnerSet Name + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutoscalingListener is the Schema for the autoscalinglisteners API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AutoscalingListenerSpec defines the desired state of AutoscalingListener + properties: + autoscalingRunnerSetName: + description: Required + type: string + autoscalingRunnerSetNamespace: + description: Required + type: string + ephemeralRunnerSetName: + description: Required + type: string + githubConfigSecret: + description: Required + type: string + githubConfigUrl: + description: Required + type: string + image: + description: Required + type: string + imagePullSecrets: + description: Required + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + maxRunners: + description: Required + minimum: 0 + type: integer + minRunners: + description: Required + minimum: 0 + type: integer + runnerScaleSetId: + description: Required + type: integer + type: object + status: + description: AutoscalingListenerStatus defines the observed state of AutoscalingListener + type: object + type: object + served: true + storage: true + subresources: + status: {} + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml new file mode 100644 index 0000000000..9d60edaa99 --- /dev/null +++ b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml @@ -0,0 +1,4218 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + name: autoscalingrunnersets.actions.github.com +spec: + group: actions.github.com + names: + kind: AutoscalingRunnerSet + listKind: AutoscalingRunnerSetList + plural: autoscalingrunnersets + singular: autoscalingrunnerset + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.minRunners + name: Minimum Runners + type: number + - jsonPath: .spec.maxRunners + name: Maximum Runners + type: number + - jsonPath: .status.currentRunners + name: Current Runners + type: number + - jsonPath: .status.state + name: State + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutoscalingRunnerSet is the Schema for the autoscalingrunnersets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AutoscalingRunnerSetSpec defines the desired state of AutoscalingRunnerSet + properties: + githubConfigSecret: + description: Required + type: string + githubConfigUrl: + description: Required + type: string + githubServerTLS: + properties: + certConfigMapRef: + description: Required + type: string + type: object + maxRunners: + minimum: 0 + type: integer + minRunners: + minimum: 0 + type: integer + proxy: + properties: + http: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + https: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + type: object + runnerGroup: + type: string + template: + description: Required + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: 'Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + activeDeadlineSeconds: + description: Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + type: boolean + containers: + description: List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. + properties: + nameservers: + description: A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: 'EnableServiceLinks indicates whether information about services should be injected into pod''s environment variables, matching the syntax of Docker links. Optional: Defaults to true.' + type: boolean + ephemeralContainers: + description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted." + properties: + args: + description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral containers. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + targetContainerName: + description: "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. \n The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined." + type: string + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. + items: + description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: 'Use the host''s ipc namespace. Optional: Default to false.' + type: boolean + hostNetwork: + description: Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. + type: boolean + hostPID: + description: 'Use the host''s pid namespace. Optional: Default to false.' + type: boolean + hostUsers: + description: 'Use the host''s user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.' + type: boolean + hostname: + description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: 'ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + x-kubernetes-map-type: atomic + os: + description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup" + properties: + name: + description: 'Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null' + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md' + type: object + preemptionPolicy: + description: PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. + type: string + readinessGates: + description: 'If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates' + items: + description: PodReadinessGate contains the reference to a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + type: string + runtimeClassName: + description: 'RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class' + type: string + schedulerName: + description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. + type: string + securityContext: + description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' + properties: + fsGroup: + description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: 'DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.' + type: string + serviceAccountName: + description: 'ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/' + type: string + setHostnameAsFQDN: + description: If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. + type: boolean + shareProcessNamespace: + description: 'Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.' + type: boolean + subdomain: + description: If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: 'List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes' + items: + description: Volume represents a named volume in a pod that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. \n Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). \n Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. \n Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. \n A pod can use both types of ephemeral volumes and persistent volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). \n An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. \n This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with other supported volume types + properties: + configMap: + description: configMap information about the configMap data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional field specify whether the Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the mount point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no group + type: string + readOnly: + description: readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + type: object + status: + description: AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet + properties: + currentRunners: + type: integer + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/actions.github.com_ephemeralrunners.yaml b/config/crd/bases/actions.github.com_ephemeralrunners.yaml new file mode 100644 index 0000000000..4712d7662f --- /dev/null +++ b/config/crd/bases/actions.github.com_ephemeralrunners.yaml @@ -0,0 +1,4249 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + name: ephemeralrunners.actions.github.com +spec: + group: actions.github.com + names: + kind: EphemeralRunner + listKind: EphemeralRunnerList + plural: ephemeralrunners + singular: ephemeralrunner + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.githubConfigUrl + name: GitHub Config URL + type: string + - jsonPath: .status.runnerId + name: RunnerId + type: number + - jsonPath: .status.phase + name: Status + type: string + - jsonPath: .status.jobRepositoryName + name: JobRepository + type: string + - jsonPath: .status.jobWorkflowRef + name: JobWorkflowRef + type: string + - jsonPath: .status.workflowRunId + name: WorkflowRunId + type: number + - jsonPath: .status.jobDisplayName + name: JobDisplayName + type: string + - jsonPath: .status.message + name: Message + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: EphemeralRunner is the Schema for the ephemeralrunners API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: EphemeralRunnerSpec defines the desired state of EphemeralRunner + properties: + githubConfigSecret: + type: string + githubConfigUrl: + type: string + githubServerTLS: + properties: + certConfigMapRef: + description: Required + type: string + type: object + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + proxy: + properties: + http: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + https: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + type: object + runnerScaleSetId: + type: integer + spec: + description: 'Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + activeDeadlineSeconds: + description: Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + type: boolean + containers: + description: List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. + properties: + nameservers: + description: A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: 'EnableServiceLinks indicates whether information about services should be injected into pod''s environment variables, matching the syntax of Docker links. Optional: Defaults to true.' + type: boolean + ephemeralContainers: + description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted." + properties: + args: + description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral containers. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + targetContainerName: + description: "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. \n The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined." + type: string + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. + items: + description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: 'Use the host''s ipc namespace. Optional: Default to false.' + type: boolean + hostNetwork: + description: Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. + type: boolean + hostPID: + description: 'Use the host''s pid namespace. Optional: Default to false.' + type: boolean + hostUsers: + description: 'Use the host''s user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.' + type: boolean + hostname: + description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: 'ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + x-kubernetes-map-type: atomic + os: + description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup" + properties: + name: + description: 'Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null' + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md' + type: object + preemptionPolicy: + description: PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. + type: string + readinessGates: + description: 'If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates' + items: + description: PodReadinessGate contains the reference to a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + type: string + runtimeClassName: + description: 'RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class' + type: string + schedulerName: + description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. + type: string + securityContext: + description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' + properties: + fsGroup: + description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: 'DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.' + type: string + serviceAccountName: + description: 'ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/' + type: string + setHostnameAsFQDN: + description: If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. + type: boolean + shareProcessNamespace: + description: 'Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.' + type: boolean + subdomain: + description: If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: 'List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes' + items: + description: Volume represents a named volume in a pod that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. \n Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). \n Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. \n Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. \n A pod can use both types of ephemeral volumes and persistent volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). \n An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. \n This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with other supported volume types + properties: + configMap: + description: configMap information about the configMap data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional field specify whether the Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the mount point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no group + type: string + readOnly: + description: readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + status: + description: EphemeralRunnerStatus defines the observed state of EphemeralRunner + properties: + failures: + additionalProperties: + type: boolean + type: object + jobDisplayName: + type: string + jobRepositoryName: + type: string + jobRequestId: + format: int64 + type: integer + jobWorkflowRef: + type: string + message: + type: string + phase: + description: "Phase describes phases where EphemeralRunner can be in. The underlying type is a PodPhase, but the meaning is more restrictive \n The PodFailed phase should be set only when EphemeralRunner fails to start after multiple retries. That signals that this EphemeralRunner won't work, and manual inspection is required \n The PodSucceded phase should be set only when confirmed that EphemeralRunner actually executed the job and has been removed from the service." + type: string + ready: + description: Turns true only if the runner is online. + type: boolean + reason: + type: string + runnerId: + type: integer + runnerJITConfig: + type: string + runnerName: + type: string + workflowRunId: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml b/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml new file mode 100644 index 0000000000..913aee5dc2 --- /dev/null +++ b/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml @@ -0,0 +1,4206 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + name: ephemeralrunnersets.actions.github.com +spec: + group: actions.github.com + names: + kind: EphemeralRunnerSet + listKind: EphemeralRunnerSetList + plural: ephemeralrunnersets + singular: ephemeralrunnerset + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.replicas + name: DesiredReplicas + type: integer + - jsonPath: .status.currentReplicas + name: CurrentReplicas + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: EphemeralRunnerSet is the Schema for the ephemeralrunnersets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: EphemeralRunnerSetSpec defines the desired state of EphemeralRunnerSet + properties: + ephemeralRunnerSpec: + description: EphemeralRunnerSpec defines the desired state of EphemeralRunner + properties: + githubConfigSecret: + type: string + githubConfigUrl: + type: string + githubServerTLS: + properties: + certConfigMapRef: + description: Required + type: string + type: object + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + proxy: + properties: + http: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + https: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + type: object + runnerScaleSetId: + type: integer + spec: + description: 'Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + activeDeadlineSeconds: + description: Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + type: boolean + containers: + description: List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. + properties: + nameservers: + description: A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: 'EnableServiceLinks indicates whether information about services should be injected into pod''s environment variables, matching the syntax of Docker links. Optional: Defaults to true.' + type: boolean + ephemeralContainers: + description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted." + properties: + args: + description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral containers. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + targetContainerName: + description: "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. \n The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined." + type: string + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. + items: + description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: 'Use the host''s ipc namespace. Optional: Default to false.' + type: boolean + hostNetwork: + description: Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. + type: boolean + hostPID: + description: 'Use the host''s pid namespace. Optional: Default to false.' + type: boolean + hostUsers: + description: 'Use the host''s user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.' + type: boolean + hostname: + description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: 'ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + x-kubernetes-map-type: atomic + os: + description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup" + properties: + name: + description: 'Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null' + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md' + type: object + preemptionPolicy: + description: PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. + type: string + readinessGates: + description: 'If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates' + items: + description: PodReadinessGate contains the reference to a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + type: string + runtimeClassName: + description: 'RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class' + type: string + schedulerName: + description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. + type: string + securityContext: + description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' + properties: + fsGroup: + description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: 'DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.' + type: string + serviceAccountName: + description: 'ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/' + type: string + setHostnameAsFQDN: + description: If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. + type: boolean + shareProcessNamespace: + description: 'Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.' + type: boolean + subdomain: + description: If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: 'List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes' + items: + description: Volume represents a named volume in a pod that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. \n Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). \n Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. \n Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. \n A pod can use both types of ephemeral volumes and persistent volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). \n An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. \n This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with other supported volume types + properties: + configMap: + description: configMap information about the configMap data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional field specify whether the Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the mount point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no group + type: string + readOnly: + description: readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + replicas: + description: Replicas is the number of desired EphemeralRunner resources in the k8s namespace. + type: integer + type: object + status: + description: EphemeralRunnerSetStatus defines the observed state of EphemeralRunnerSet + properties: + currentReplicas: + description: CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet. + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index a7cf6d6dfe..428ad3a22c 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -7,6 +7,10 @@ resources: - bases/actions.summerwind.dev_runnerdeployments.yaml - bases/actions.summerwind.dev_horizontalrunnerautoscalers.yaml - bases/actions.summerwind.dev_runnersets.yaml +- bases/actions.github.com_autoscalingrunnersets.yaml +- bases/actions.github.com_ephemeralrunners.yaml +- bases/actions.github.com_ephemeralrunnersets.yaml +- bases/actions.github.com_autoscalinglisteners.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 1fb166ecd9..d16d16987a 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -50,6 +50,14 @@ spec: optional: true - name: GITHUB_APP_PRIVATE_KEY value: /etc/actions-runner-controller/github_app_private_key + - name: CONTROLLER_MANAGER_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: CONTROLLER_MANAGER_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace volumeMounts: - name: controller-manager mountPath: "/etc/actions-runner-controller" diff --git a/config/rbac/autoscalinglistener_editor_role.yaml b/config/rbac/autoscalinglistener_editor_role.yaml new file mode 100644 index 0000000000..b89881efea --- /dev/null +++ b/config/rbac/autoscalinglistener_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit autoscalinglisteners. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: autoscalinglistener-editor-role +rules: +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners/status + verbs: + - get diff --git a/config/rbac/autoscalinglistener_viewer_role.yaml b/config/rbac/autoscalinglistener_viewer_role.yaml new file mode 100644 index 0000000000..4a831665c1 --- /dev/null +++ b/config/rbac/autoscalinglistener_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view autoscalinglisteners. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: autoscalinglistener-viewer-role +rules: +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners + verbs: + - get + - list + - watch +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners/status + verbs: + - get diff --git a/config/rbac/autoscalingrunnerset_editor_role.yaml b/config/rbac/autoscalingrunnerset_editor_role.yaml new file mode 100644 index 0000000000..9a1abf507e --- /dev/null +++ b/config/rbac/autoscalingrunnerset_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit autoscalingrunnersets. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: autoscalingrunnerset-editor-role +rules: +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets/status + verbs: + - get diff --git a/config/rbac/autoscalingrunnerset_viewer_role.yaml b/config/rbac/autoscalingrunnerset_viewer_role.yaml new file mode 100644 index 0000000000..5f15149fd4 --- /dev/null +++ b/config/rbac/autoscalingrunnerset_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view autoscalingrunnersets. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: autoscalingrunnerset-viewer-role +rules: +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets + verbs: + - get + - list + - watch +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets/status + verbs: + - get diff --git a/config/rbac/ephemeralrunner_editor_role.yaml b/config/rbac/ephemeralrunner_editor_role.yaml new file mode 100644 index 0000000000..b22bee8ebe --- /dev/null +++ b/config/rbac/ephemeralrunner_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit ephemeralrunners. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ephemeralrunner-editor-role +rules: +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners/status + verbs: + - get diff --git a/config/rbac/ephemeralrunner_viewer_role.yaml b/config/rbac/ephemeralrunner_viewer_role.yaml new file mode 100644 index 0000000000..5f5dab671b --- /dev/null +++ b/config/rbac/ephemeralrunner_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view ephemeralrunners. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ephemeralrunner-viewer-role +rules: +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners + verbs: + - get + - list + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners/status + verbs: + - get diff --git a/config/rbac/ephemeralrunnerset_editor_role.yaml b/config/rbac/ephemeralrunnerset_editor_role.yaml new file mode 100644 index 0000000000..e02d20eeec --- /dev/null +++ b/config/rbac/ephemeralrunnerset_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit ephemeralrunnersets. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ephemeralrunnerset-editor-role +rules: +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets/status + verbs: + - get diff --git a/config/rbac/ephemeralrunnerset_viewer_role.yaml b/config/rbac/ephemeralrunnerset_viewer_role.yaml new file mode 100644 index 0000000000..56913a9cfd --- /dev/null +++ b/config/rbac/ephemeralrunnerset_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view ephemeralrunnersets. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ephemeralrunnerset-viewer-role +rules: +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets + verbs: + - get + - list + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 37c567d538..29ab888173 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -6,6 +6,110 @@ metadata: creationTimestamp: null name: manager-role rules: +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners/finalizers + verbs: + - update +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners/status + verbs: + - get + - patch + - update +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets/finalizers + verbs: + - update +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets/status + verbs: + - get + - patch + - update +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners/status + verbs: + - get + - patch + - update +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets/status + verbs: + - get + - patch + - update - apiGroups: - actions.summerwind.dev resources: @@ -202,6 +306,26 @@ rules: verbs: - create - patch +- apiGroups: + - "" + resources: + - namespaces + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces/status + - pods/status + verbs: + - get - apiGroups: - "" resources: @@ -249,14 +373,22 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get - apiGroups: - "" resources: - secrets verbs: + - create - delete - get - list + - update - watch - apiGroups: - "" @@ -266,6 +398,8 @@ rules: - create - delete - get + - list + - watch - apiGroups: - rbac.authorization.k8s.io resources: @@ -274,6 +408,8 @@ rules: - create - delete - get + - list + - watch - apiGroups: - rbac.authorization.k8s.io resources: @@ -282,3 +418,6 @@ rules: - create - delete - get + - list + - update + - watch diff --git a/controllers/actions.github.com/.keep b/controllers/actions.github.com/.keep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/controllers/actions.github.com/autoscalinglistener_controller.go b/controllers/actions.github.com/autoscalinglistener_controller.go new file mode 100644 index 0000000000..4078278f01 --- /dev/null +++ b/controllers/actions.github.com/autoscalinglistener_controller.go @@ -0,0 +1,450 @@ +/* +Copyright 2020 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package actionsgithubcom + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + hash "github.com/actions/actions-runner-controller/hash" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + autoscalingListenerOwnerKey = ".metadata.controller" + autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer" +) + +// AutoscalingListenerReconciler reconciles a AutoscalingListener object +type AutoscalingListenerReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + + resourceBuilder resourceBuilder +} + +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=pods/status,verbs=get +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update +// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;delete;get;list;watch;update +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;delete;get;list;watch +// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalinglisteners,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalinglisteners/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalinglisteners/finalizers,verbs=update + +// Reconcile a AutoscalingListener resource to meet its desired spec. +func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("autoscalinglistener", req.NamespacedName) + + autoscalingListener := new(v1alpha1.AutoscalingListener) + if err := r.Get(ctx, req.NamespacedName, autoscalingListener); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if !autoscalingListener.ObjectMeta.DeletionTimestamp.IsZero() { + if controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) { + log.Info("Deleting resources") + done, err := r.cleanupResources(ctx, autoscalingListener, log) + if err != nil { + log.Error(err, "Failed to cleanup resources after deletion") + return ctrl.Result{}, err + } + if !done { + log.Info("Waiting for resources to be deleted before removing finalizer") + return ctrl.Result{}, nil + } + + log.Info("Removing finalizer") + err = patch(ctx, r.Client, autoscalingListener, func(obj *v1alpha1.AutoscalingListener) { + controllerutil.RemoveFinalizer(obj, autoscalingListenerFinalizerName) + }) + if err != nil && !kerrors.IsNotFound(err) { + log.Error(err, "Failed to remove finalizer") + return ctrl.Result{}, err + } + + log.Info("Successfully removed finalizer after cleanup") + } + return ctrl.Result{}, nil + } + + if !controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) { + log.Info("Adding finalizer") + if err := patch(ctx, r.Client, autoscalingListener, func(obj *v1alpha1.AutoscalingListener) { + controllerutil.AddFinalizer(obj, autoscalingListenerFinalizerName) + }); err != nil { + log.Error(err, "Failed to add finalizer") + return ctrl.Result{}, err + } + + log.Info("Successfully added finalizer") + return ctrl.Result{}, nil + } + + // Check if the AutoscalingRunnerSet exists + var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet + if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Spec.AutoscalingRunnerSetName}, &autoscalingRunnerSet); err != nil { + log.Error(err, "Failed to find AutoscalingRunnerSet.", + "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + "name", autoscalingListener.Spec.AutoscalingRunnerSetName) + return ctrl.Result{}, err + } + + // Check if the GitHub config secret exists + secret := new(corev1.Secret) + if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Spec.GitHubConfigSecret}, secret); err != nil { + log.Error(err, "Failed to find GitHub config secret.", + "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + "name", autoscalingListener.Spec.GitHubConfigSecret) + return ctrl.Result{}, err + } + + // Create a mirror secret in the same namespace as the AutoscalingListener + mirrorSecret := new(corev1.Secret) + if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerSecretMirrorName(autoscalingListener)}, mirrorSecret); err != nil { + if !kerrors.IsNotFound(err) { + log.Error(err, "Unable to get listener secret mirror", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerSecretMirrorName(autoscalingListener)) + return ctrl.Result{}, err + } + + // Create a mirror secret for the listener pod in the Controller namespace for listener pod to use + log.Info("Creating a mirror listener secret for the listener pod") + return r.createSecretsForListener(ctx, autoscalingListener, secret, log) + } + + // make sure the mirror secret is up to date + mirrorSecretDataHash := mirrorSecret.Labels["secret-data-hash"] + secretDataHash := hash.ComputeTemplateHash(secret.Data) + if mirrorSecretDataHash != secretDataHash { + log.Info("Updating mirror listener secret for the listener pod", "mirrorSecretDataHash", mirrorSecretDataHash, "secretDataHash", secretDataHash) + return r.updateSecretsForListener(ctx, secret, mirrorSecret, log) + } + + // Make sure the runner scale set listener service account is created for the listener pod in the controller namespace + serviceAccount := new(corev1.ServiceAccount) + if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerServiceAccountName(autoscalingListener)}, serviceAccount); err != nil { + if !kerrors.IsNotFound(err) { + log.Error(err, "Unable to get listener service accounts", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerServiceAccountName(autoscalingListener)) + return ctrl.Result{}, err + } + + // Create a service account for the listener pod in the controller namespace + log.Info("Creating a service account for the listener pod") + return r.createServiceAccountForListener(ctx, autoscalingListener, log) + } + + // TODO: make sure the service account is up to date + + // Make sure the runner scale set listener role is created in the AutoscalingRunnerSet namespace + listenerRole := new(rbacv1.Role) + if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole); err != nil { + if !kerrors.IsNotFound(err) { + log.Error(err, "Unable to get listener role", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener)) + return ctrl.Result{}, err + } + + // Create a role for the listener pod in the AutoScalingRunnerSet namespace + log.Info("Creating a role for the listener pod") + return r.createRoleForListener(ctx, autoscalingListener, log) + } + + // Make sure the listener role has the up-to-date rules + existingRuleHash := listenerRole.Labels["role-policy-rules-hash"] + desiredRules := rulesForListenerRole([]string{autoscalingListener.Spec.EphemeralRunnerSetName}) + desiredRulesHash := hash.ComputeTemplateHash(&desiredRules) + if existingRuleHash != desiredRulesHash { + log.Info("Updating the listener role with the up-to-date rules") + return r.updateRoleForListener(ctx, listenerRole, desiredRules, desiredRulesHash, log) + } + + // Make sure the runner scale set listener role binding is created + listenerRoleBinding := new(rbacv1.RoleBinding) + if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding); err != nil { + if !kerrors.IsNotFound(err) { + log.Error(err, "Unable to get listener role binding", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener)) + return ctrl.Result{}, err + } + + // Create a role binding for the listener pod in the AutoScalingRunnerSet namespace + log.Info("Creating a role binding for the service account and role") + return r.createRoleBindingForListener(ctx, autoscalingListener, listenerRole, serviceAccount, log) + } + + // TODO: make sure the role binding has the up-to-date role and service account + + listenerPod := new(corev1.Pod) + if err := r.Get(ctx, client.ObjectKey{Namespace: autoscalingListener.Namespace, Name: autoscalingListener.Name}, listenerPod); err != nil { + if !kerrors.IsNotFound(err) { + log.Error(err, "Unable to get listener pod", "namespace", autoscalingListener.Namespace, "name", autoscalingListener.Name) + return ctrl.Result{}, err + } + + // Create a listener pod in the controller namespace + log.Info("Creating a listener pod") + return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log) + } + + // The listener pod failed might mean the mirror secret is out of date + // Delete the listener pod and re-create it to make sure the mirror secret is up to date + if listenerPod.Status.Phase == corev1.PodFailed && listenerPod.DeletionTimestamp.IsZero() { + log.Info("Listener pod failed, deleting it and re-creating it", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", listenerPod.Status.Reason, "message", listenerPod.Status.Message) + if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) { + log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name) + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error { + groupVersionIndexer := func(rawObj client.Object) []string { + groupVersion := v1alpha1.GroupVersion.String() + owner := metav1.GetControllerOf(rawObj) + if owner == nil { + return nil + } + + // ...make sure it is owned by this controller + if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" { + return nil + } + + // ...and if so, return it + return []string{owner.Name} + } + + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil { + return err + } + + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil { + return err + } + + labelBasedWatchFunc := func(obj client.Object) []reconcile.Request { + var requests []reconcile.Request + labels := obj.GetLabels() + namespace, ok := labels["auto-scaling-listener-namespace"] + if !ok { + return nil + } + + name, ok := labels["auto-scaling-listener-name"] + if !ok { + return nil + } + requests = append(requests, + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: name, + Namespace: namespace, + }, + }, + ) + return requests + } + + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.AutoscalingListener{}). + Owns(&corev1.Pod{}). + Owns(&corev1.ServiceAccount{}). + Owns(&corev1.Secret{}). + Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)). + Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)). + WithEventFilter(predicate.ResourceVersionChangedPredicate{}). + Complete(r) +} + +func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) { + logger.Info("Cleaning up the listener pod") + listenerPod := new(corev1.Pod) + err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod) + switch { + case err == nil: + if listenerPod.ObjectMeta.DeletionTimestamp.IsZero() { + logger.Info("Deleting the listener pod") + if err := r.Delete(ctx, listenerPod); err != nil { + return false, fmt.Errorf("failed to delete listener pod: %v", err) + } + } + return false, nil + case err != nil && !kerrors.IsNotFound(err): + return false, fmt.Errorf("failed to get listener pods: %v", err) + } + logger.Info("Listener pod is deleted") + + logger.Info("Cleaning up the listener service account") + listenerSa := new(corev1.ServiceAccount) + err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa) + switch { + case err == nil: + if listenerSa.ObjectMeta.DeletionTimestamp.IsZero() { + logger.Info("Deleting the listener service account") + if err := r.Delete(ctx, listenerSa); err != nil { + return false, fmt.Errorf("failed to delete listener service account: %v", err) + } + } + return false, nil + case err != nil && !kerrors.IsNotFound(err): + return false, fmt.Errorf("failed to get listener service account: %v", err) + } + logger.Info("Listener service account is deleted") + + return true, nil +} + +func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) { + newServiceAccount := r.resourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener) + + if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil { + return ctrl.Result{}, err + } + + logger.Info("Creating listener service accounts", "namespace", newServiceAccount.Namespace, "name", newServiceAccount.Name) + if err := r.Create(ctx, newServiceAccount); err != nil { + logger.Error(err, "Unable to create listener service accounts", "namespace", newServiceAccount.Namespace, "name", newServiceAccount.Name) + return ctrl.Result{}, err + } + + logger.Info("Created listener service accounts", "namespace", newServiceAccount.Namespace, "name", newServiceAccount.Name) + return ctrl.Result{}, nil +} + +func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) { + newPod := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, serviceAccount, secret) + + if err := ctrl.SetControllerReference(autoscalingListener, newPod, r.Scheme); err != nil { + return ctrl.Result{}, err + } + + logger.Info("Creating listener pod", "namespace", newPod.Namespace, "name", newPod.Name) + if err := r.Create(ctx, newPod); err != nil { + logger.Error(err, "Unable to create listener pod", "namespace", newPod.Namespace, "name", newPod.Name) + return ctrl.Result{}, err + } + + logger.Info("Created listener pod", "namespace", newPod.Namespace, "name", newPod.Name) + return ctrl.Result{}, nil +} + +func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) { + newListenerSecret := r.resourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret) + + if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil { + return ctrl.Result{}, err + } + + logger.Info("Creating listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name) + if err := r.Create(ctx, newListenerSecret); err != nil { + logger.Error(err, "Unable to create listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name) + return ctrl.Result{}, err + } + + logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name) + return ctrl.Result{}, nil +} + +func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) { + dataHash := hash.ComputeTemplateHash(secret.Data) + updatedMirrorSecret := mirrorSecret.DeepCopy() + updatedMirrorSecret.Labels["secret-data-hash"] = dataHash + updatedMirrorSecret.Data = secret.Data + + logger.Info("Updating listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash) + if err := r.Update(ctx, updatedMirrorSecret); err != nil { + logger.Error(err, "Unable to update listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name) + return ctrl.Result{}, err + } + + logger.Info("Updated listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash) + return ctrl.Result{}, nil +} + +func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) { + newRole := r.resourceBuilder.newScaleSetListenerRole(autoscalingListener) + + logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules) + if err := r.Create(ctx, newRole); err != nil { + logger.Error(err, "Unable to create listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules) + return ctrl.Result{}, err + } + + logger.Info("Created listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules) + return ctrl.Result{Requeue: true}, nil +} + +func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Context, listenerRole *rbacv1.Role, desiredRules []rbacv1.PolicyRule, desiredRulesHash string, logger logr.Logger) (ctrl.Result, error) { + updatedPatchRole := listenerRole.DeepCopy() + updatedPatchRole.Labels["role-policy-rules-hash"] = desiredRulesHash + updatedPatchRole.Rules = desiredRules + + logger.Info("Updating listener role in namespace to have the right permission", "namespace", updatedPatchRole.Namespace, "name", updatedPatchRole.Name, "oldRules", listenerRole.Rules, "newRules", updatedPatchRole.Rules) + if err := r.Update(ctx, updatedPatchRole); err != nil { + logger.Error(err, "Unable to update listener role", "namespace", updatedPatchRole.Namespace, "name", updatedPatchRole.Name, "rules", updatedPatchRole.Rules) + return ctrl.Result{}, err + } + + logger.Info("Updated listener role in namespace to have the right permission", "namespace", updatedPatchRole.Namespace, "name", updatedPatchRole.Name, "rules", updatedPatchRole.Rules) + return ctrl.Result{Requeue: true}, nil +} + +func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) { + newRoleBinding := r.resourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount) + + logger.Info("Creating listener role binding", + "namespace", newRoleBinding.Namespace, + "name", newRoleBinding.Name, + "role", listenerRole.Name, + "serviceAccountNamespace", serviceAccount.Namespace, + "serviceAccount", serviceAccount.Name) + if err := r.Create(ctx, newRoleBinding); err != nil { + logger.Error(err, "Unable to create listener role binding", + "namespace", newRoleBinding.Namespace, + "name", newRoleBinding.Name, + "role", listenerRole.Name, + "serviceAccountNamespace", serviceAccount.Namespace, + "serviceAccount", serviceAccount.Name) + return ctrl.Result{}, err + } + + logger.Info("Created listener role binding", + "namespace", newRoleBinding.Namespace, + "name", newRoleBinding.Name, + "role", listenerRole.Name, + "serviceAccountNamespace", serviceAccount.Namespace, + "serviceAccount", serviceAccount.Name) + return ctrl.Result{Requeue: true}, nil +} diff --git a/controllers/actions.github.com/autoscalinglistener_controller_test.go b/controllers/actions.github.com/autoscalinglistener_controller_test.go new file mode 100644 index 0000000000..839497cdc2 --- /dev/null +++ b/controllers/actions.github.com/autoscalinglistener_controller_test.go @@ -0,0 +1,393 @@ +package actionsgithubcom + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" +) + +const ( + autoScalingListenerTestTimeout = time.Second * 5 + autoScalingListenerTestInterval = time.Millisecond * 250 + autoScalingListenerTestGitHubToken = "gh_token" +) + +var _ = Describe("Test AutoScalingListener controller", func() { + var ctx context.Context + var cancel context.CancelFunc + autoScalingNS := new(corev1.Namespace) + autoScalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet) + configSecret := new(corev1.Secret) + autoScalingListener := new(actionsv1alpha1.AutoscalingListener) + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.TODO()) + autoScalingNS = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-listener" + RandStringRunes(5)}, + } + + err := k8sClient.Create(ctx, autoScalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") + + configSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-config-secret", + Namespace: autoScalingNS.Name, + }, + Data: map[string][]byte{ + "github_token": []byte(autoScalingListenerTestGitHubToken), + }, + } + + err = k8sClient.Create(ctx, configSecret) + Expect(err).NotTo(HaveOccurred(), "failed to create config secret") + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Namespace: autoScalingNS.Name, + MetricsBindAddress: "0", + }) + Expect(err).NotTo(HaveOccurred(), "failed to create manager") + + controller := &AutoscalingListenerReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + } + err = controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + min := 1 + max := 10 + autoScalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoScalingNS.Name, + }, + Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + MaxRunners: &max, + MinRunners: &min, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + err = k8sClient.Create(ctx, autoScalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + autoScalingListener = &actionsv1alpha1.AutoscalingListener{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asl", + Namespace: autoScalingNS.Name, + }, + Spec: actionsv1alpha1.AutoscalingListenerSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + RunnerScaleSetId: 1, + AutoscalingRunnerSetNamespace: autoScalingRunnerSet.Namespace, + AutoscalingRunnerSetName: autoScalingRunnerSet.Name, + EphemeralRunnerSetName: "test-ers", + MaxRunners: 10, + MinRunners: 1, + Image: "ghcr.io/owner/repo", + }, + } + + err = k8sClient.Create(ctx, autoScalingListener) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingListener") + + go func() { + defer GinkgoRecover() + + err := mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred(), "failed to start manager") + }() + }) + + AfterEach(func() { + defer cancel() + + err := k8sClient.Delete(ctx, autoScalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") + }) + + Context("When creating a new AutoScalingListener", func() { + It("It should create/add all required resources for a new AutoScalingListener (finalizer, secret, service account, role, rolebinding, pod)", func() { + // Check if finalizer is added + created := new(actionsv1alpha1.AutoscalingListener) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, created) + if err != nil { + return "", err + } + if len(created.Finalizers) == 0 { + return "", nil + } + return created.Finalizers[0], nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer") + + // Check if secret is created + mirrorSecret := new(corev1.Secret) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoScalingListener), Namespace: autoScalingListener.Namespace}, mirrorSecret) + if err != nil { + return "", err + } + return string(mirrorSecret.Data["github_token"]), nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListenerTestGitHubToken), "Mirror secret should be created") + + // Check if service account is created + serviceAccount := new(corev1.ServiceAccount) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerServiceAccountName(autoScalingListener), Namespace: autoScalingListener.Namespace}, serviceAccount) + if err != nil { + return "", err + } + return serviceAccount.Name, nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerServiceAccountName(autoScalingListener)), "Service account should be created") + + // Check if role is created + role := new(rbacv1.Role) + Eventually( + func() ([]rbacv1.PolicyRule, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoScalingListener), Namespace: autoScalingListener.Spec.AutoscalingRunnerSetNamespace}, role) + if err != nil { + return nil, err + } + + return role.Rules, nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{autoScalingListener.Spec.EphemeralRunnerSetName})), "Role should be created") + + // Check if rolebinding is created + roleBinding := new(rbacv1.RoleBinding) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoScalingListener), Namespace: autoScalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding) + if err != nil { + return "", err + } + + return roleBinding.RoleRef.Name, nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerRoleName(autoScalingListener)), "Rolebinding should be created") + + // Check if pod is created + pod := new(corev1.Pod) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod) + if err != nil { + return "", err + } + + return pod.Name, nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created") + }) + }) + + Context("When deleting a new AutoScalingListener", func() { + It("It should cleanup all resources for a deleting AutoScalingListener before removing it", func() { + // Waiting for the pod is created + pod := new(corev1.Pod) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod) + if err != nil { + return "", err + } + + return pod.Name, nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created") + + // Delete the AutoScalingListener + err := k8sClient.Delete(ctx, autoScalingListener) + Expect(err).NotTo(HaveOccurred(), "failed to delete test AutoScalingListener") + + // Cleanup the listener pod + Eventually( + func() error { + podList := new(corev1.PodList) + err := k8sClient.List(ctx, podList, client.InNamespace(autoScalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoScalingListener.Name}) + if err != nil { + return err + } + + if len(podList.Items) > 0 { + return fmt.Errorf("pod still exists") + } + + return nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete pod") + + // Cleanup the listener service account + Eventually( + func() error { + serviceAccountList := new(corev1.ServiceAccountList) + err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoScalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoScalingListener.Name}) + if err != nil { + return err + } + + if len(serviceAccountList.Items) > 0 { + return fmt.Errorf("service account still exists") + } + + return nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete service account") + + // The AutoScalingListener should be deleted + Eventually( + func() error { + listenerList := new(actionsv1alpha1.AutoscalingListenerList) + err := k8sClient.List(ctx, listenerList, client.InNamespace(autoScalingListener.Namespace), client.MatchingFields{".metadata.name": autoScalingListener.Name}) + if err != nil { + return err + } + + if len(listenerList.Items) > 0 { + return fmt.Errorf("AutoScalingListener still exists") + } + return nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete AutoScalingListener") + }) + }) + + Context("React to changes in the AutoScalingListener", func() { + It("It should update role to match EphemeralRunnerSet", func() { + // Waiting for the pod is created + pod := new(corev1.Pod) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod) + if err != nil { + return "", err + } + + return pod.Name, nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created") + + // Update the AutoScalingListener + updated := autoScalingListener.DeepCopy() + updated.Spec.EphemeralRunnerSetName = "test-ers-updated" + err := k8sClient.Patch(ctx, updated, client.MergeFrom(autoScalingListener)) + Expect(err).NotTo(HaveOccurred(), "failed to update test AutoScalingListener") + + // Check if role is updated with right rules + role := new(rbacv1.Role) + Eventually( + func() ([]rbacv1.PolicyRule, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoScalingListener), Namespace: autoScalingListener.Spec.AutoscalingRunnerSetNamespace}, role) + if err != nil { + return nil, err + } + + return role.Rules, nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated") + }) + + It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() { + // Waiting for the pod is created + pod := new(corev1.Pod) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod) + if err != nil { + return "", err + } + + return pod.Name, nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created") + + // Update the secret + updatedSecret := configSecret.DeepCopy() + updatedSecret.Data["github_token"] = []byte(autoScalingListenerTestGitHubToken + "_updated") + err := k8sClient.Update(ctx, updatedSecret) + Expect(err).NotTo(HaveOccurred(), "failed to update test secret") + + updatedPod := pod.DeepCopy() + updatedPod.Status.Phase = corev1.PodFailed + err = k8sClient.Status().Update(ctx, updatedPod) + Expect(err).NotTo(HaveOccurred(), "failed to update test pod to failed") + + // Check if mirror secret is updated with right data + mirrorSecret := new(corev1.Secret) + Eventually( + func() (map[string][]byte, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoScalingListener), Namespace: autoScalingListener.Namespace}, mirrorSecret) + if err != nil { + return nil, err + } + + return mirrorSecret.Data, nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(BeEquivalentTo(updatedSecret.Data), "Mirror secret should be updated") + + // Check if we re-created a new pod + Eventually( + func() error { + latestPod := new(corev1.Pod) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, latestPod) + if err != nil { + return err + } + if latestPod.UID == pod.UID { + return fmt.Errorf("Pod should be recreated") + } + + return nil + }, + autoScalingListenerTestTimeout, + autoScalingListenerTestInterval).Should(Succeed(), "Pod should be recreated") + }) + }) +}) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go new file mode 100644 index 0000000000..2be67c0ef8 --- /dev/null +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -0,0 +1,506 @@ +/* +Copyright 2020 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package actionsgithubcom + +import ( + "context" + "fmt" + "sort" + "strconv" + + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/github/actions" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + // TODO: Replace with shared image. + name = "autoscaler" + autoscalingRunnerSetOwnerKey = ".metadata.controller" + LabelKeyRunnerSpecHash = "runner-spec-hash" + LabelKeyAutoScaleRunnerSetName = "auto-scale-runner-set-name" + autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" + runnerScaleSetIdKey = "runner-scale-set-id" + + // scaleSetListenerLabel is the key of pod.meta.labels to label + // that the pod is a listener application + scaleSetListenerLabel = "runner-scale-set-listener" +) + +// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object +type AutoscalingRunnerSetReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + ControllerNamespace string + DefaultRunnerScaleSetListenerImage string + DefaultRunnerScaleSetListenerImagePullSecrets []string + ActionsClient actions.MultiClient + + resourceBuilder resourceBuilder +} + +// +kubebuilder:rbac:groups=core,resources=namespaces;pods,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=namespaces/status;pods/status,verbs=get +// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets/finalizers,verbs=update +// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalinglisteners,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalinglisteners/status,verbs=get;update;patch + +// Reconcile a AutoscalingRunnerSet resource to meet its desired spec. +func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("autoscalingrunnerset", req.NamespacedName) + + autoscalingRunnerSet := new(v1alpha1.AutoscalingRunnerSet) + if err := r.Get(ctx, req.NamespacedName, autoscalingRunnerSet); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if !autoscalingRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() { + if controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) { + log.Info("Deleting resources") + done, err := r.cleanupListener(ctx, autoscalingRunnerSet, log) + if err != nil { + log.Error(err, "Failed to clean up listener") + return ctrl.Result{}, err + } + if !done { + // we are going to get notified anyway to proceed with rest of the + // cleanup. No need to re-queue + log.Info("Waiting for listener to be deleted") + return ctrl.Result{}, nil + } + + done, err = r.cleanupEphemeralRunnerSets(ctx, autoscalingRunnerSet, log) + if err != nil { + log.Error(err, "Failed to clean up ephemeral runner sets") + return ctrl.Result{}, err + } + if !done { + log.Info("Waiting for ephemeral runner sets to be deleted") + return ctrl.Result{}, nil + } + + log.Info("Removing finalizer") + err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { + controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName) + }) + if err != nil && !kerrors.IsNotFound(err) { + log.Error(err, "Failed to update autoscaling runner set without finalizer") + return ctrl.Result{}, err + } + + log.Info("Successfully removed finalizer after cleanup") + } + return ctrl.Result{}, nil + } + + if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) { + log.Info("Adding finalizer") + if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { + controllerutil.AddFinalizer(obj, autoscalingRunnerSetFinalizerName) + }); err != nil { + log.Error(err, "Failed to update autoscaling runner set with finalizer added") + return ctrl.Result{}, err + } + + log.Info("Successfully added finalizer") + return ctrl.Result{}, nil + } + + scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdKey] + if !ok { + // Need to create a new runner scale set on Actions service + log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.") + return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log) + } + + if id, err := strconv.Atoi(scaleSetIdRaw); err != nil || id <= 0 { + log.Info("Runner scale set id annotation is not an id, or is <= 0. Creating a new runner scale set.") + // something modified the scaleSetId. Try to create one + return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log) + } + + secret := new(corev1.Secret) + if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, secret); err != nil { + log.Error(err, "Failed to find GitHub config secret.", + "namespace", autoscalingRunnerSet.Namespace, + "name", autoscalingRunnerSet.Spec.GitHubConfigSecret) + return ctrl.Result{}, err + } + + existingRunnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet) + if err != nil { + log.Error(err, "Failed to list existing ephemeral runner sets") + return ctrl.Result{}, err + } + + latestRunnerSet := existingRunnerSets.latest() + if latestRunnerSet == nil { + log.Info("Latest runner set does not exist. Creating a new runner set.") + return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log) + } + + desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() + for _, runnerSet := range existingRunnerSets.all() { + log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[LabelKeyRunnerSpecHash]) + } + + if desiredSpecHash != latestRunnerSet.Labels[LabelKeyRunnerSpecHash] { + log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set ") + return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log) + } + + oldRunnerSets := existingRunnerSets.old() + if len(oldRunnerSets) > 0 { + log.Info("Cleanup old ephemeral runner sets", "count", len(oldRunnerSets)) + err := r.deleteEphemeralRunnerSets(ctx, oldRunnerSets, log) + if err != nil { + log.Error(err, "Failed to clean up old runner sets") + return ctrl.Result{}, err + } + } + + // Make sure the AutoscalingListener is up and running in the controller namespace + listener := new(v1alpha1.AutoscalingListener) + if err := r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, listener); err != nil { + if kerrors.IsNotFound(err) { + // We don't have a listener + log.Info("Creating a new AutoscalingListener for the runner set", "ephemeralRunnerSetName", latestRunnerSet.Name) + return r.createAutoScalingListenerForRunnerSet(ctx, autoscalingRunnerSet, latestRunnerSet, log) + } + log.Error(err, "Failed to get AutoscalingListener resource") + return ctrl.Result{}, err + } + + // Our listener pod is out of date, so we need to delete it to get a new recreate. + if listener.Labels[LabelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() { + log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name) + if err := r.Delete(ctx, listener); err != nil { + if kerrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + log.Error(err, "Failed to delete AutoscalingListener resource") + return ctrl.Result{}, err + } + + log.Info("Deleted RunnerScaleSetListener since existing one is out of date") + return ctrl.Result{}, nil + } + + // Update the status of autoscaling runner set. + if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners { + if err := patch(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { + obj.Status.CurrentRunners = latestRunnerSet.Status.CurrentReplicas + }); err != nil { + log.Error(err, "Failed to update autoscaling runner set status with current runner count") + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + +func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) { + logger.Info("Cleaning up the listener") + var listener v1alpha1.AutoscalingListener + err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener) + switch { + case err == nil: + if listener.ObjectMeta.DeletionTimestamp.IsZero() { + logger.Info("Deleting the listener") + if err := r.Delete(ctx, &listener); err != nil { + return false, fmt.Errorf("failed to delete listener: %v", err) + } + } + return false, nil + case err != nil && !kerrors.IsNotFound(err): + return false, fmt.Errorf("failed to get listener: %v", err) + } + + logger.Info("Listener is deleted") + return true, nil +} + +func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) { + logger.Info("Cleaning up ephemeral runner sets") + runnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet) + if err != nil { + return false, fmt.Errorf("failed to list ephemeral runner sets: %v", err) + } + if runnerSets.empty() { + logger.Info("All ephemeral runner sets are deleted") + return true, nil + } + + logger.Info("Deleting all ephemeral runner sets", "count", runnerSets.count()) + if err := r.deleteEphemeralRunnerSets(ctx, runnerSets.all(), logger); err != nil { + return false, fmt.Errorf("failed to delete ephemeral runner sets: %v", err) + } + return false, nil +} + +func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.Context, oldRunnerSets []v1alpha1.EphemeralRunnerSet, logger logr.Logger) error { + for i := range oldRunnerSets { + rs := &oldRunnerSets[i] + // already deleted but contains finalizer so it still exists + if !rs.ObjectMeta.DeletionTimestamp.IsZero() { + logger.Info("Skip ephemeral runner set since it is already marked for deletion", "name", rs.Name) + continue + } + logger.Info("Deleting ephemeral runner set", "name", rs.Name) + if err := r.Delete(ctx, rs); err != nil { + return fmt.Errorf("failed to delete EphemeralRunnerSet resource: %v", err) + } + logger.Info("Deleted ephemeral runner set", "name", rs.Name) + } + return nil +} + +func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) { + logger.Info("Creating a new runner scale set") + actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) + if err != nil { + logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set") + return ctrl.Result{}, err + } + runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, autoscalingRunnerSet.Name) + if err != nil { + logger.Error(err, "Failed to get runner scale set from Actions service") + return ctrl.Result{}, err + } + + if runnerScaleSet == nil { + runnerGroupId := 1 + if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 { + runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup) + if err != nil { + logger.Error(err, "Failed to get runner group by name", "runnerGroup", autoscalingRunnerSet.Spec.RunnerGroup) + return ctrl.Result{}, err + } + + runnerGroupId = int(runnerGroup.ID) + } + + runnerScaleSet, err = actionsClient.CreateRunnerScaleSet( + ctx, + &actions.RunnerScaleSet{ + Name: autoscalingRunnerSet.Name, + RunnerGroupId: runnerGroupId, + Labels: []actions.Label{ + { + Name: autoscalingRunnerSet.Name, + Type: "System", + }, + }, + RunnerSetting: actions.RunnerSetting{ + Ephemeral: true, + DisableUpdate: true, + }, + }) + if err != nil { + logger.Error(err, "Failed to create a new runner scale set on Actions service") + return ctrl.Result{}, err + } + } + + logger.Info("Created/Reused a runner scale set", "id", runnerScaleSet.Id) + if autoscalingRunnerSet.Annotations == nil { + autoscalingRunnerSet.Annotations = map[string]string{} + } + + autoscalingRunnerSet.Annotations[runnerScaleSetIdKey] = strconv.Itoa(runnerScaleSet.Id) + logger.Info("Adding runner scale set ID as an annotation") + if err := r.Update(ctx, autoscalingRunnerSet); err != nil { + logger.Error(err, "Failed to add runner scale set ID") + return ctrl.Result{}, err + } + + logger.Info("Updated with runner scale set ID as an annotation") + return ctrl.Result{}, nil +} + +func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) { + desiredRunnerSet, err := r.resourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet) + if err != nil { + log.Error(err, "Could not create EphemeralRunnerSet") + return ctrl.Result{}, err + } + + if err := ctrl.SetControllerReference(autoscalingRunnerSet, desiredRunnerSet, r.Scheme); err != nil { + log.Error(err, "Failed to set controller reference to a new EphemeralRunnerSet") + return ctrl.Result{}, err + } + + log.Info("Creating a new EphemeralRunnerSet resource", "name", desiredRunnerSet.Name) + if err := r.Create(ctx, desiredRunnerSet); err != nil { + log.Error(err, "Failed to create EphemeralRunnerSet resource") + return ctrl.Result{}, err + } + + log.Info("Created a new EphemeralRunnerSet resource", "name", desiredRunnerSet.Name) + return ctrl.Result{}, nil +} + +func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (ctrl.Result, error) { + var imagePullSecrets []corev1.LocalObjectReference + for _, imagePullSecret := range r.DefaultRunnerScaleSetListenerImagePullSecrets { + imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{ + Name: imagePullSecret, + }) + } + + autoscalingListener, err := r.resourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets) + if err != nil { + log.Error(err, "Could not create AutoscalingListener spec") + return ctrl.Result{}, err + } + + log.Info("Creating a new AutoscalingListener resource", "name", autoscalingListener.Name, "namespace", autoscalingListener.Namespace) + if err := r.Create(ctx, autoscalingListener); err != nil { + log.Error(err, "Failed to create AutoscalingListener resource") + return ctrl.Result{}, err + } + + log.Info("Created a new AutoscalingListener resource", "name", autoscalingListener.Name, "namespace", autoscalingListener.Namespace) + return ctrl.Result{}, nil +} + +func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) { + list := new(v1alpha1.EphemeralRunnerSetList) + if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingRunnerSet.Name}); err != nil { + return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err) + } + + return &EphemeralRunnerSets{list: list}, nil +} + +func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (actions.ActionsService, error) { + var configSecret corev1.Secret + if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, &configSecret); err != nil { + return nil, fmt.Errorf("failed to find GitHub config secret: %w", err) + } + + return r.ActionsClient.GetClientFromSecret(ctx, autoscalingRunnerSet.Spec.GitHubConfigUrl, autoscalingRunnerSet.Namespace, configSecret.Data) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { + groupVersionIndexer := func(rawObj client.Object) []string { + groupVersion := v1alpha1.GroupVersion.String() + owner := metav1.GetControllerOf(rawObj) + if owner == nil { + return nil + } + + // ...make sure it is owned by this controller + if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingRunnerSet" { + return nil + } + + // ...and if so, return it + return []string{owner.Name} + } + + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, autoscalingRunnerSetOwnerKey, groupVersionIndexer); err != nil { + return err + } + + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.AutoscalingRunnerSet{}). + Owns(&v1alpha1.EphemeralRunnerSet{}). + Watches(&source.Kind{Type: &v1alpha1.AutoscalingListener{}}, handler.EnqueueRequestsFromMapFunc( + func(o client.Object) []reconcile.Request { + autoscalingListener := o.(*v1alpha1.AutoscalingListener) + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + Name: autoscalingListener.Spec.AutoscalingRunnerSetName, + }, + }, + } + }, + )). + WithEventFilter(predicate.ResourceVersionChangedPredicate{}). + Complete(r) +} + +// NOTE: if this is logic should be used for other resources, +// consider using generics +type EphemeralRunnerSets struct { + list *v1alpha1.EphemeralRunnerSetList + sorted bool +} + +func (rs *EphemeralRunnerSets) latest() *v1alpha1.EphemeralRunnerSet { + if rs.empty() { + return nil + } + if !rs.sorted { + rs.sort() + } + return rs.list.Items[0].DeepCopy() +} + +func (rs *EphemeralRunnerSets) old() []v1alpha1.EphemeralRunnerSet { + if rs.empty() { + return nil + } + if !rs.sorted { + rs.sort() + } + copy := rs.list.DeepCopy() + return copy.Items[1:] +} + +func (rs *EphemeralRunnerSets) all() []v1alpha1.EphemeralRunnerSet { + if rs.empty() { + return nil + } + copy := rs.list.DeepCopy() + return copy.Items +} + +func (rs *EphemeralRunnerSets) empty() bool { + return rs.list == nil || len(rs.list.Items) == 0 +} + +func (rs *EphemeralRunnerSets) sort() { + sort.Slice(rs.list.Items, func(i, j int) bool { + return rs.list.Items[i].GetCreationTimestamp().After(rs.list.Items[j].GetCreationTimestamp().Time) + }) +} + +func (rs *EphemeralRunnerSets) count() int { + return len(rs.list.Items) +} diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go new file mode 100644 index 0000000000..fd1e5c6f6e --- /dev/null +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -0,0 +1,367 @@ +package actionsgithubcom + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/github/actions/fake" +) + +const ( + autoScalingRunnerSetTestTimeout = time.Second * 5 + autoScalingRunnerSetTestInterval = time.Millisecond * 250 + autoScalingRunnerSetTestGitHubToken = "gh_token" +) + +var _ = Describe("Test AutoScalingRunnerSet controller", func() { + var ctx context.Context + var cancel context.CancelFunc + autoScalingNS := new(corev1.Namespace) + autoScalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet) + configSecret := new(corev1.Secret) + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.TODO()) + autoScalingNS = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)}, + } + + err := k8sClient.Create(ctx, autoScalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") + + configSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-config-secret", + Namespace: autoScalingNS.Name, + }, + Data: map[string][]byte{ + "github_token": []byte(autoScalingRunnerSetTestGitHubToken), + }, + } + + err = k8sClient.Create(ctx, configSecret) + Expect(err).NotTo(HaveOccurred(), "failed to create config secret") + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Namespace: autoScalingNS.Name, + MetricsBindAddress: "0", + }) + Expect(err).NotTo(HaveOccurred(), "failed to create manager") + + controller := &AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ControllerNamespace: autoScalingNS.Name, + DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", + ActionsClient: fake.NewMultiClient(), + } + err = controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + min := 1 + max := 10 + autoScalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoScalingNS.Name, + }, + Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + MaxRunners: &max, + MinRunners: &min, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + err = k8sClient.Create(ctx, autoScalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + go func() { + defer GinkgoRecover() + + err := mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred(), "failed to start manager") + }() + }) + + AfterEach(func() { + defer cancel() + + err := k8sClient.Delete(ctx, autoScalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") + }) + + Context("When creating a new AutoScalingRunnerSet", func() { + It("It should create/add all required resources for a new AutoScalingRunnerSet (finalizer, runnerscaleset, ephemeralrunnerset, listener)", func() { + // Check if finalizer is added + created := new(actionsv1alpha1.AutoscalingRunnerSet) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, created) + if err != nil { + return "", err + } + if len(created.Finalizers) == 0 { + return "", nil + } + return created.Finalizers[0], nil + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer") + + // Check if runner scale set is created on service + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, created) + if err != nil { + return "", err + } + + if _, ok := created.Annotations[runnerScaleSetIdKey]; !ok { + return "", nil + } + + return created.Annotations[runnerScaleSetIdKey], nil + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).Should(BeEquivalentTo("1"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation") + + // Check if ephemeral runner set is created + Eventually( + func() (int, error) { + runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + if err != nil { + return 0, err + } + + return len(runnerSetList.Items), nil + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created") + + // Check if listener is created + Eventually( + func() error { + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") + + // Check if status is updated + runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") + Expect(len(runnerSetList.Items)).To(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created") + runnerSet := runnerSetList.Items[0] + statusUpdate := runnerSet.DeepCopy() + statusUpdate.Status.CurrentReplicas = 100 + err = k8sClient.Status().Patch(ctx, statusUpdate, client.MergeFrom(&runnerSet)) + Expect(err).NotTo(HaveOccurred(), "failed to patch EphemeralRunnerSet status") + + Eventually( + func() (int, error) { + updated := new(actionsv1alpha1.AutoscalingRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, updated) + if err != nil { + return 0, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err) + } + return updated.Status.CurrentRunners, nil + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(100), "AutoScalingRunnerSet status should be updated") + }) + }) + + Context("When deleting a new AutoScalingRunnerSet", func() { + It("It should cleanup all resources for a deleting AutoScalingRunnerSet before removing it", func() { + // Wait till the listener is created + Eventually( + func() error { + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") + + // Delete the AutoScalingRunnerSet + err := k8sClient.Delete(ctx, autoScalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to delete AutoScalingRunnerSet") + + // Check if the listener is deleted + Eventually( + func() error { + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) + if err != nil && errors.IsNotFound(err) { + return nil + } + + return fmt.Errorf("listener is not deleted") + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be deleted") + + // Check if all the EphemeralRunnerSet is deleted + Eventually( + func() error { + runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + if err != nil { + return err + } + + if len(runnerSetList.Items) != 0 { + return fmt.Errorf("EphemeralRunnerSet is not deleted, count=%v", len(runnerSetList.Items)) + } + + return nil + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).Should(Succeed(), "All EphemeralRunnerSet should be deleted") + + // Check if the AutoScalingRunnerSet is deleted + Eventually( + func() error { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingRunnerSet)) + if err != nil && errors.IsNotFound(err) { + return nil + } + + return fmt.Errorf("AutoScalingRunnerSet is not deleted") + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).Should(Succeed(), "AutoScalingRunnerSet should be deleted") + }) + }) + + Context("When updating a new AutoScalingRunnerSet", func() { + It("It should re-create EphemeralRunnerSet and Listener as needed when updating AutoScalingRunnerSet", func() { + // Wait till the listener is created + listener := new(actionsv1alpha1.AutoscalingListener) + Eventually( + func() error { + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener) + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") + + runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") + Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet") + runnerSet := runnerSetList.Items[0] + + // Update the AutoScalingRunnerSet.Spec.Template + // This should trigger re-creation of EphemeralRunnerSet and Listener + patched := autoScalingRunnerSet.DeepCopy() + patched.Spec.Template.Spec.PriorityClassName = "test-priority-class" + err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoScalingRunnerSet)) + Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") + autoScalingRunnerSet = patched.DeepCopy() + + // We should create a new EphemeralRunnerSet and delete the old one, eventually, we will have only one EphemeralRunnerSet + Eventually( + func() (string, error) { + runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + if err != nil { + return "", err + } + + if len(runnerSetList.Items) != 1 { + return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items)) + } + + return runnerSetList.Items[0].Labels[LabelKeyRunnerSpecHash], nil + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[LabelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created") + + // We should create a new listener + Eventually( + func() (string, error) { + listener := new(actionsv1alpha1.AutoscalingListener) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener) + if err != nil { + return "", err + } + + return listener.Spec.EphemeralRunnerSetName, nil + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Name), "New Listener should be created") + + // Only update the Spec for the AutoScalingListener + // This should trigger re-creation of the Listener only + runnerSetList = new(actionsv1alpha1.EphemeralRunnerSetList) + err = k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") + Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet") + runnerSet = runnerSetList.Items[0] + + listener = new(actionsv1alpha1.AutoscalingListener) + err = k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener) + Expect(err).NotTo(HaveOccurred(), "failed to get Listener") + + patched = autoScalingRunnerSet.DeepCopy() + min := 10 + patched.Spec.MinRunners = &min + err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoScalingRunnerSet)) + Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") + + // We should not re-create a new EphemeralRunnerSet + Consistently( + func() (string, error) { + runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + if err != nil { + return "", err + } + + if len(runnerSetList.Items) != 1 { + return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items)) + } + + return string(runnerSetList.Items[0].UID), nil + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(string(runnerSet.UID)), "New EphemeralRunnerSet should not be created") + + // We should only re-create a new listener + Eventually( + func() (string, error) { + listener := new(actionsv1alpha1.AutoscalingListener) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener) + if err != nil { + return "", err + } + + return string(listener.UID), nil + }, + autoScalingRunnerSetTestTimeout, + autoScalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(string(listener.UID)), "New Listener should be created") + }) + }) +}) diff --git a/controllers/actions.github.com/clientutil.go b/controllers/actions.github.com/clientutil.go new file mode 100644 index 0000000000..e3dfbebb4f --- /dev/null +++ b/controllers/actions.github.com/clientutil.go @@ -0,0 +1,22 @@ +package actionsgithubcom + +import ( + "context" + + kclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +type object[T kclient.Object] interface { + kclient.Object + DeepCopy() T +} + +type patcher interface { + Patch(ctx context.Context, obj kclient.Object, patch kclient.Patch, opts ...kclient.PatchOption) error +} + +func patch[T object[T]](ctx context.Context, client patcher, obj T, update func(obj T)) error { + original := obj.DeepCopy() + update(obj) + return client.Patch(ctx, obj, kclient.MergeFrom(original)) +} diff --git a/controllers/actions.github.com/constants.go b/controllers/actions.github.com/constants.go new file mode 100644 index 0000000000..0ff80d53d0 --- /dev/null +++ b/controllers/actions.github.com/constants.go @@ -0,0 +1,10 @@ +package actionsgithubcom + +const ( + LabelKeyRunnerTemplateHash = "runner-template-hash" + LabelKeyPodTemplateHash = "pod-template-hash" +) + +const ( + EnvVarRunnerJITConfig = "ACTIONS_RUNNER_INPUT_JITCONFIG" +) diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go new file mode 100644 index 0000000000..3ef0306cbd --- /dev/null +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -0,0 +1,645 @@ +/* +Copyright 2020 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package actionsgithubcom + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/github/actions" + "github.com/go-logr/logr" + "go.uber.org/multierr" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +const ( + // EphemeralRunnerContainerName is the name of the runner container. + // It represents the name of the container running the self-hosted runner image. + EphemeralRunnerContainerName = "runner" + + ephemeralRunnerFinalizerName = "ephemeralrunner.actions.github.com/finalizer" +) + +// EphemeralRunnerReconciler reconciles a EphemeralRunner object +type EphemeralRunnerReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + ActionsClient actions.MultiClient + resourceBuilder resourceBuilder +} + +// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/finalizers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;delete +// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch +// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=create;delete;get +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;delete;get +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;delete;get + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.6.4/pkg/reconcile +func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("ephemeralrunner", req.NamespacedName) + + ephemeralRunner := new(v1alpha1.EphemeralRunner) + if err := r.Get(ctx, req.NamespacedName, ephemeralRunner); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if !ephemeralRunner.ObjectMeta.DeletionTimestamp.IsZero() { + if controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) { + log.Info("Finalizing ephemeral runner") + done, err := r.cleanupResources(ctx, ephemeralRunner, log) + if err != nil { + log.Error(err, "Failed to clean up ephemeral runner owned resources") + return ctrl.Result{}, err + } + if !done { + log.Info("Waiting for ephemeral runner owned resources to be deleted") + return ctrl.Result{}, nil + } + + done, err = r.cleanupContainerHooksResources(ctx, ephemeralRunner, log) + if err != nil { + log.Error(err, "Failed to clean up container hooks resources") + return ctrl.Result{}, err + } + if !done { + log.Info("Waiting for container hooks resources to be deleted") + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + + log.Info("Removing finalizer") + err = patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + controllerutil.RemoveFinalizer(obj, ephemeralRunnerFinalizerName) + }) + if err != nil && !kerrors.IsNotFound(err) { + log.Error(err, "Failed to update ephemeral runner without the finalizer") + return ctrl.Result{}, err + } + + log.Info("Successfully removed finalizer after cleanup") + return ctrl.Result{}, nil + } + return ctrl.Result{}, nil + } + + if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) { + log.Info("Adding finalizer") + if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + controllerutil.AddFinalizer(obj, ephemeralRunnerFinalizerName) + }); err != nil { + log.Error(err, "Failed to update with finalizer set") + return ctrl.Result{}, err + } + + log.Info("Successfully added finalizer") + return ctrl.Result{}, nil + } + + if ephemeralRunner.Status.Phase == corev1.PodSucceeded || ephemeralRunner.Status.Phase == corev1.PodFailed { + // Stop reconciling on this object. + // The EphemeralRunnerSet is responsible for cleaning it up. + log.Info("EphemeralRunner has already finished. Stopping reconciliation and waiting for EphemeralRunnerSet to clean it up", "phase", ephemeralRunner.Status.Phase) + return ctrl.Result{}, nil + } + + if ephemeralRunner.Status.RunnerId == 0 { + log.Info("Creating new ephemeral runner registration and updating status with runner config") + return r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log) + } + + secret := new(corev1.Secret) + if err := r.Get(ctx, req.NamespacedName, secret); err != nil { + if !kerrors.IsNotFound(err) { + log.Error(err, "Failed to fetch secret") + return ctrl.Result{}, err + } + // create secret if not created + log.Info("Creating new ephemeral runner secret for jitconfig.") + return r.createSecret(ctx, ephemeralRunner, log) + } + + pod := new(corev1.Pod) + if err := r.Get(ctx, req.NamespacedName, pod); err != nil { + switch { + case !kerrors.IsNotFound(err): + log.Error(err, "Failed to fetch the pod") + return ctrl.Result{}, err + + case len(ephemeralRunner.Status.Failures) > 5: + log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed") + if err := r.markAsFailed(ctx, ephemeralRunner, log); err != nil { + log.Error(err, "Failed to set ephemeral runner to phase Failed") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + + default: + // Pod was not found. Create if the pod has never been created + log.Info("Creating new EphemeralRunner pod.") + return r.createPod(ctx, ephemeralRunner, secret, log) + } + } + + cs := runnerContainerStatus(pod) + switch { + case cs == nil: + // starting, no container state yet + log.Info("Waiting for runner container status to be available") + return ctrl.Result{}, nil + case cs.State.Terminated == nil: // still running or evicted + if pod.Status.Phase == corev1.PodFailed && pod.Status.Reason == "Evicted" { + log.Info("Pod set the termination phase, but container state is not terminated. Deleting pod", + "PodPhase", pod.Status.Phase, + "PodReason", pod.Status.Reason, + "PodMessage", pod.Status.Message, + ) + + if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil { + log.Error(err, "failed to delete pod as failed on pod.Status.Phase: Failed") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + + log.Info("Ephemeral runner container is still running") + if err := r.updateRunStatusFromPod(ctx, ephemeralRunner, pod, log); err != nil { + log.Info("Failed to update ephemeral runner status. Requeue to not miss this event") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + + case cs.State.Terminated.ExitCode != 0: // failed + log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode) + if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil { + log.Error(err, "Failed to delete runner pod on failure") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + + default: + // pod succeeded. We double-check with the service if the runner exists. + // The reason is that image can potentially finish with status 0, but not pick up the job. + existsInService, err := r.runnerRegisteredWithService(ctx, ephemeralRunner.DeepCopy(), log) + if err != nil { + log.Error(err, "Failed to check if runner is registered with the service") + return ctrl.Result{}, err + } + if !existsInService { + // the runner does not exist in the service, so it must be done + log.Info("Ephemeral runner has finished since it does not exist in the service anymore") + if err := r.markAsFinished(ctx, ephemeralRunner, log); err != nil { + log.Error(err, "Failed to mark ephemeral runner as finished") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + + // The runner still exists. This can happen if the pod exited with 0 but fails to start + log.Info("Ephemeral runner pod has finished, but the runner still exists in the service. Deleting the pod to restart it.") + if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil { + log.Error(err, "failed to delete a pod that still exists in the service") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } +} + +func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (deleted bool, err error) { + log.Info("Cleaning up the runner pod") + pod := new(corev1.Pod) + err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod) + switch { + case err == nil: + if pod.ObjectMeta.DeletionTimestamp.IsZero() { + log.Info("Deleting the runner pod") + if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) { + return false, fmt.Errorf("failed to delete pod: %v", err) + } + } + return false, nil + case err != nil && !kerrors.IsNotFound(err): + return false, err + } + log.Info("Pod is deleted") + + log.Info("Cleaning up the runner jitconfig secret") + secret := new(corev1.Secret) + err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, secret) + switch { + case err == nil: + if secret.ObjectMeta.DeletionTimestamp.IsZero() { + log.Info("Deleting the jitconfig secret") + if err := r.Delete(ctx, secret); err != nil && !kerrors.IsNotFound(err) { + return false, fmt.Errorf("failed to delete secret: %v", err) + } + } + return false, nil + case err != nil && !kerrors.IsNotFound(err): + return false, err + } + log.Info("Secret is deleted") + + return true, nil +} + +func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) { + log.Info("Cleaning up runner linked pods") + done, err = r.cleanupRunnerLinkedPods(ctx, ephemeralRunner, log) + if err != nil { + return false, fmt.Errorf("failed to clean up runner linked pods: %v", err) + } + + if !done { + return false, nil + } + + log.Info("Cleaning up runner linked secrets") + done, err = r.cleanupRunnerLinkedSecrets(ctx, ephemeralRunner, log) + if err != nil { + return false, err + } + + return done, nil +} + +func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) { + runnerLinedLabels := client.MatchingLabels( + map[string]string{ + "runner-pod": ephemeralRunner.Name, + }, + ) + var runnerLinkedPodList corev1.PodList + err = r.List(ctx, &runnerLinkedPodList, client.InNamespace(ephemeralRunner.Namespace), runnerLinedLabels) + if err != nil { + return false, fmt.Errorf("failed to list runner-linked pods: %v", err) + } + + if len(runnerLinkedPodList.Items) == 0 { + return true, nil + } + + log.Info("Deleting container hooks runner-linked pods", "count", len(runnerLinkedPodList.Items)) + + var errs []error + for i := range runnerLinkedPodList.Items { + linkedPod := &runnerLinkedPodList.Items[i] + if !linkedPod.ObjectMeta.DeletionTimestamp.IsZero() { + continue + } + + log.Info("Deleting container hooks runner-linked pod", "name", linkedPod.Name) + if err := r.Delete(ctx, linkedPod); err != nil && !kerrors.IsNotFound(err) { + errs = append(errs, fmt.Errorf("failed to delete runner linked pod %q: %v", linkedPod.Name, err)) + } + } + + return false, multierr.Combine(errs...) +} + +func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) { + runnerLinkedLabels := client.MatchingLabels( + map[string]string{ + "runner-pod": ephemeralRunner.ObjectMeta.Name, + }, + ) + var runnerLinkedSecretList corev1.SecretList + err = r.List(ctx, &runnerLinkedSecretList, client.InNamespace(ephemeralRunner.Namespace), runnerLinkedLabels) + if err != nil { + return false, fmt.Errorf("failed to list runner-linked secrets: %w", err) + } + + if len(runnerLinkedSecretList.Items) == 0 { + return true, nil + } + + log.Info("Deleting container hooks runner-linked secrets", "count", len(runnerLinkedSecretList.Items)) + + var errs []error + for i := range runnerLinkedSecretList.Items { + s := &runnerLinkedSecretList.Items[i] + if !s.ObjectMeta.DeletionTimestamp.IsZero() { + continue + } + + log.Info("Deleting container hooks runner-linked secret", "name", s.Name) + if err := r.Delete(ctx, s); err != nil && !kerrors.IsNotFound(err) { + errs = append(errs, fmt.Errorf("failed to delete runner linked secret %q: %v", s.Name, err)) + } + } + + return false, multierr.Combine(errs...) +} + +func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error { + log.Info("Updating ephemeral runner status to Failed") + if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + obj.Status.Phase = corev1.PodFailed + obj.Status.Reason = "TooManyPodFailures" + obj.Status.Message = "Pod has failed to start more than 5 times" + }); err != nil { + return fmt.Errorf("failed to update ephemeral runner status Phase/Message: %v", err) + } + + log.Info("Removing the runner from the service") + if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil { + return fmt.Errorf("failed to remove the runner from service: %v", err) + } + + log.Info("EphemeralRunner is marked as Failed and deleted from the service") + return nil +} + +func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error { + log.Info("Updating ephemeral runner status to Finished") + if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + obj.Status.Phase = corev1.PodSucceeded + }); err != nil { + return fmt.Errorf("failed to update ephemeral runner with status finished: %v", err) + } + + log.Info("EphemeralRunner status is marked as Finished") + return nil +} + +// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count. +// It should not be responsible for setting the status to Failed. +func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error { + if pod.ObjectMeta.DeletionTimestamp.IsZero() { + log.Info("Deleting the ephemeral runner pod", "podId", pod.UID) + if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) { + return fmt.Errorf("failed to delete pod with status failed: %v", err) + } + } + + log.Info("Updating ephemeral runner status to track the failure count") + if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + if obj.Status.Failures == nil { + obj.Status.Failures = make(map[string]bool) + } + obj.Status.Failures[string(pod.UID)] = true + obj.Status.Ready = false + obj.Status.Reason = pod.Status.Reason + obj.Status.Message = pod.Status.Message + }); err != nil { + return fmt.Errorf("failed to update ephemeral runner status: failed attempts: %v", err) + } + + log.Info("EphemeralRunner pod is deleted and status is updated with failure count") + return nil +} + +// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner +// This method should always set .status.runnerId and .status.runnerJITConfig +func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) { + // Runner is not registered with the service. We need to register it first + log.Info("Creating ephemeral runner JIT config") + actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %v", err) + } + + jitSettings := &actions.RunnerScaleSetJitRunnerSetting{ + Name: ephemeralRunner.Name, + } + jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId) + if err != nil { + actionsError := &actions.ActionsError{} + if !errors.As(err, &actionsError) { + return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %v", err) + } + + if actionsError.StatusCode != http.StatusConflict || + !strings.Contains(actionsError.ExceptionName, "AgentExistsException") { + return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err) + } + + // If the runner with the name we want already exists it means: + // - We might have a name collision. + // - Our previous reconciliation loop failed to update the + // status with the runnerId and runnerJITConfig after the `GenerateJitRunnerConfig` + // created the runner registration on the service. + // We will try to get the runner and see if it's belong to this AutoScalingRunnerSet, + // if so, we can simply delete the runner registration and create a new one. + log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name) + existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to get runner by name: %v", err) + } + + if existingRunner == nil { + log.Info("Runner with the same name does not exist, re-queuing the reconciliation") + return ctrl.Result{Requeue: true}, nil + } + + log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId) + if existingRunner.RunnerScaleSetId == ephemeralRunner.Spec.RunnerScaleSetId { + log.Info("Removing the runner with the same name") + err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id)) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %v", err) + } + + log.Info("Removed the runner with the same name, re-queuing the reconciliation") + return ctrl.Result{Requeue: true}, nil + } + + // TODO: Do we want to mark the ephemeral runner as failed, and let EphemeralRunnerSet to clean it up, so we can recover from this situation? + // The situation is that the EphemeralRunner's name is already used by something else to register a runner, and we can't take the control back. + return ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %v", err) + } + log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id) + + log.Info("Updating ephemeral runner status with runnerId and runnerJITConfig") + err = patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + obj.Status.RunnerId = jitConfig.Runner.Id + obj.Status.RunnerName = jitConfig.Runner.Name + obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig + }) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %v", err) + } + + log.Info("Updated ephemeral runner status with runnerId and runnerJITConfig") + return ctrl.Result{}, nil +} + +func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) { + log.Info("Creating new pod for ephemeral runner") + newPod := r.resourceBuilder.newEphemeralRunnerPod(ctx, runner, secret) + + if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil { + log.Error(err, "Failed to set controller reference to a new pod") + return ctrl.Result{}, err + } + + log.Info("Created new pod spec for ephemeral runner") + if err := r.Create(ctx, newPod); err != nil { + log.Error(err, "Failed to create pod resource for ephemeral runner.") + return ctrl.Result{}, err + } + + log.Info("Created ephemeral runner pod", + "runnerScaleSetId", runner.Spec.RunnerScaleSetId, + "runnerName", runner.Status.RunnerName, + "runnerId", runner.Status.RunnerId, + "configUrl", runner.Spec.GitHubConfigUrl, + "podName", newPod.Name) + + return ctrl.Result{}, nil +} + +func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) { + log.Info("Creating new secret for ephemeral runner") + jitSecret := r.resourceBuilder.newEphemeralRunnerJitSecret(runner) + + if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err) + } + + log.Info("Created new secret spec for ephemeral runner") + if err := r.Create(ctx, jitSecret); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create jit secret: %v", err) + } + + log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name) + return ctrl.Result{}, nil +} + +// updateRunStatusFromPod is responsible for updating non-exiting statuses. +// It should never update phase to Failed or Succeeded +// +// The event should not be re-queued since the termination status should be set +// before proceeding with reconciliation logic +func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error { + if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed { + return nil + } + if ephemeralRunner.Status.Phase == pod.Status.Phase { + return nil + } + + log.Info("Updating ephemeral runner status with pod phase", "phase", pod.Status.Phase, "reason", pod.Status.Reason, "message", pod.Status.Message) + err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + obj.Status.Phase = pod.Status.Phase + obj.Status.Ready = obj.Status.Ready || (pod.Status.Phase == corev1.PodRunning) + obj.Status.Reason = pod.Status.Reason + obj.Status.Message = pod.Status.Message + }) + if err != nil { + return fmt.Errorf("failed to update runner status for Phase/Reason/Message: %v", err) + } + + log.Info("Updated ephemeral runner status with pod phase") + return nil +} + +func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) (actions.ActionsService, error) { + secret := new(corev1.Secret) + if err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: runner.Spec.GitHubConfigSecret}, secret); err != nil { + return nil, fmt.Errorf("failed to get secret: %w", err) + } + + return r.ActionsClient.GetClientFromSecret(ctx, runner.Spec.GitHubConfigUrl, runner.Namespace, secret.Data) +} + +// runnerRegisteredWithService checks if the runner is still registered with the service +// Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted +func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) { + actionsClient, err := r.actionsClientFor(ctx, runner) + if err != nil { + return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err) + } + + log.Info("Checking if runner exists in GitHub service", "runnerId", runner.Status.RunnerId) + _, err = actionsClient.GetRunner(ctx, int64(runner.Status.RunnerId)) + if err != nil { + actionsError := &actions.ActionsError{} + if !errors.As(err, &actionsError) { + return false, err + } + + if actionsError.StatusCode != http.StatusNotFound || + !strings.Contains(actionsError.ExceptionName, "AgentNotFoundException") { + return false, fmt.Errorf("failed to check if runner exists in GitHub service: %v", err) + } + + log.Info("Runner does not exist in GitHub service", "runnerId", runner.Status.RunnerId) + return false, nil + } + + log.Info("Runner exists in GitHub service", "runnerId", runner.Status.RunnerId) + return true, nil +} + +func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error { + client, err := r.actionsClientFor(ctx, ephemeralRunner) + if err != nil { + return fmt.Errorf("failed to get actions client for runner: %v", err) + } + + log.Info("Removing runner from the service", "runnerId", ephemeralRunner.Status.RunnerId) + err = client.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)) + if err != nil { + return fmt.Errorf("failed to remove runner from the service: %v", err) + } + + log.Info("Removed runner from the service", "runnerId", ephemeralRunner.Status.RunnerId) + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error { + // TODO(nikola-jokic): Add indexing and filtering fields on corev1.Pod{} + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.EphemeralRunner{}). + Owns(&corev1.Pod{}). + Owns(&corev1.Secret{}). + WithEventFilter(predicate.ResourceVersionChangedPredicate{}). + Named("ephemeral-runner-controller"). + Complete(r) +} + +func runnerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus { + for i := range pod.Status.ContainerStatuses { + cs := &pod.Status.ContainerStatuses[i] + if cs.Name == EphemeralRunnerContainerName { + return cs + } + } + return nil +} diff --git a/controllers/actions.github.com/ephemeralrunner_controller_test.go b/controllers/actions.github.com/ephemeralrunner_controller_test.go new file mode 100644 index 0000000000..42f808094e --- /dev/null +++ b/controllers/actions.github.com/ephemeralrunner_controller_test.go @@ -0,0 +1,769 @@ +package actionsgithubcom + +import ( + "context" + "net/http" + "time" + + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/github/actions" + + "github.com/actions/actions-runner-controller/github/actions/fake" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +const ( + gh_token = "gh_token" + timeout = time.Second * 30 + interval = time.Millisecond * 250 + runnerImage = "ghcr.io/actions/actions-runner:latest" +) + +func newExampleRunner(name, namespace, configSecretName string) *v1alpha1.EphemeralRunner { + return &v1alpha1.EphemeralRunner{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.EphemeralRunnerSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecretName, + RunnerScaleSetId: 1, + PodTemplateSpec: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: EphemeralRunnerContainerName, + Image: runnerImage, + Command: []string{"/runner/run.sh"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "runner", + MountPath: "/runner", + }, + }, + }, + }, + InitContainers: []corev1.Container{ + { + Name: "setup", + Image: runnerImage, + Command: []string{"sh", "-c", "cp -r /actions-runner/* /runner/"}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "runner", + MountPath: "/runner", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "runner", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + }, + } + +} + +var _ = Describe("EphemeralRunner", func() { + + Describe("Resource manipulation", func() { + var ctx context.Context + var cancel context.CancelFunc + + autoScalingNS := new(corev1.Namespace) + configSecret := new(corev1.Secret) + + controller := new(EphemeralRunnerReconciler) + ephemeralRunner := new(v1alpha1.EphemeralRunner) + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + autoScalingNS = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testns-autoscaling-runner" + RandStringRunes(5), + }, + } + err := k8sClient.Create(ctx, autoScalingNS) + Expect(err).To(BeNil(), "failed to create test namespace for EphemeralRunner") + + configSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-config-secret", + Namespace: autoScalingNS.Name, + }, + Data: map[string][]byte{ + "github_token": []byte(gh_token), + }, + } + + err = k8sClient.Create(ctx, configSecret) + Expect(err).To(BeNil(), "failed to create config secret") + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Namespace: autoScalingNS.Name, + MetricsBindAddress: "0", + }) + Expect(err).To(BeNil(), "failed to create manager") + + controller = &EphemeralRunnerReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ActionsClient: fake.NewMultiClient(), + } + + err = controller.SetupWithManager(mgr) + Expect(err).To(BeNil(), "failed to setup controller") + + ephemeralRunner = newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name) + err = k8sClient.Create(ctx, ephemeralRunner) + Expect(err).To(BeNil(), "failed to create ephemeral runner") + + go func() { + defer GinkgoRecover() + + err := mgr.Start(ctx) + Expect(err).To(BeNil(), "failed to start manager") + }() + }) + + AfterEach(func() { + defer cancel() + + err := k8sClient.Delete(ctx, autoScalingNS) + Expect(err).To(BeNil(), "failed to delete test namespace for EphemeralRunner") + }) + + It("It should create/add all required resources for EphemeralRunner (finalizer, jit secret)", func() { + created := new(v1alpha1.EphemeralRunner) + // Check if finalizer is added + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, created) + if err != nil { + return "", err + } + if len(created.Finalizers) == 0 { + return "", nil + } + return created.Finalizers[0], nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(ephemeralRunnerFinalizerName)) + + Eventually( + func() (bool, error) { + secret := new(corev1.Secret) + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, secret); err != nil { + return false, err + } + + _, ok := secret.Data[jitTokenKey] + return ok, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + Eventually( + func() (string, error) { + pod := new(corev1.Pod) + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { + return "", err + } + + return pod.Name, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(ephemeralRunner.Name)) + }) + + It("It should re-create pod on failure", func() { + pod := new(corev1.Pod) + Eventually(func() (bool, error) { + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { + return false, err + } + return true, nil + }).Should(BeEquivalentTo(true)) + + err := k8sClient.Delete(ctx, pod) + Expect(err).To(BeNil(), "failed to delete pod") + + pod = new(corev1.Pod) + Eventually(func() (bool, error) { + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { + return false, err + } + return true, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + }) + + It("It should clean up resources when deleted", func() { + // wait for pod to be created + pod := new(corev1.Pod) + Eventually(func() (bool, error) { + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { + return false, err + } + return true, nil + }).Should(BeEquivalentTo(true)) + + // create runner-linked pod + runnerLinkedPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-runner-linked-pod", + Namespace: ephemeralRunner.Namespace, + Labels: map[string]string{ + "runner-pod": ephemeralRunner.Name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner-linked-container", + Image: "ubuntu:latest", + }, + }, + }, + } + + err := k8sClient.Create(ctx, runnerLinkedPod) + Expect(err).To(BeNil(), "failed to create runner linked pod") + Eventually( + func() (bool, error) { + pod := new(corev1.Pod) + if err := k8sClient.Get(ctx, client.ObjectKey{Name: runnerLinkedPod.Name, Namespace: runnerLinkedPod.Namespace}, pod); err != nil { + return false, nil + } + return true, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + // create runner linked secret + runnerLinkedSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-runner-linked-secret", + Namespace: ephemeralRunner.Namespace, + Labels: map[string]string{ + "runner-pod": ephemeralRunner.Name, + }, + }, + Data: map[string][]byte{"test": []byte("test")}, + } + + err = k8sClient.Create(ctx, runnerLinkedSecret) + Expect(err).To(BeNil(), "failed to create runner linked secret") + Eventually( + func() (bool, error) { + secret := new(corev1.Secret) + if err := k8sClient.Get(ctx, client.ObjectKey{Name: runnerLinkedSecret.Name, Namespace: runnerLinkedSecret.Namespace}, secret); err != nil { + return false, nil + } + return true, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + err = k8sClient.Delete(ctx, ephemeralRunner) + Expect(err).To(BeNil(), "failed to delete ephemeral runner") + + Eventually( + func() (bool, error) { + pod := new(corev1.Pod) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod) + if err == nil { + return false, nil + } + return kerrors.IsNotFound(err), nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + Eventually( + func() (bool, error) { + secret := new(corev1.Secret) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, secret) + if err == nil { + return false, nil + } + return kerrors.IsNotFound(err), nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + Eventually( + func() (bool, error) { + pod := new(corev1.Pod) + err = k8sClient.Get(ctx, client.ObjectKey{Name: runnerLinkedPod.Name, Namespace: runnerLinkedPod.Namespace}, pod) + if err == nil { + return false, nil + } + return kerrors.IsNotFound(err), nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + Eventually( + func() (bool, error) { + secret := new(corev1.Secret) + err = k8sClient.Get(ctx, client.ObjectKey{Name: runnerLinkedSecret.Name, Namespace: runnerLinkedSecret.Namespace}, secret) + if err == nil { + return false, nil + } + return kerrors.IsNotFound(err), nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + Eventually( + func() (bool, error) { + secret := new(corev1.Secret) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, secret) + if err == nil { + return false, nil + } + return kerrors.IsNotFound(err), nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + Eventually( + func() (bool, error) { + updated := new(v1alpha1.EphemeralRunner) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) + if err == nil { + return false, nil + } + return kerrors.IsNotFound(err), nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + }) + + It("It should eventually have runner id set", func() { + Eventually( + func() (int, error) { + updatedEphemeralRunner := new(v1alpha1.EphemeralRunner) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updatedEphemeralRunner) + if err != nil { + return 0, err + } + return updatedEphemeralRunner.Status.RunnerId, nil + }, + timeout, + interval, + ).Should(BeNumerically(">", 0)) + }) + + It("It should patch the ephemeral runner non terminating status", func() { + pod := new(corev1.Pod) + Eventually( + func() (bool, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod) + if err != nil { + return false, err + } + return true, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + for _, phase := range []corev1.PodPhase{corev1.PodRunning, corev1.PodPending} { + podCopy := pod.DeepCopy() + pod.Status.Phase = phase + // set container state to force status update + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{ + Name: EphemeralRunnerContainerName, + State: corev1.ContainerState{}, + }) + err := k8sClient.Status().Patch(ctx, pod, client.MergeFrom(podCopy)) + Expect(err).To(BeNil(), "failed to patch pod status") + + Eventually( + func() (corev1.PodPhase, error) { + updated := new(v1alpha1.EphemeralRunner) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) + if err != nil { + return "", err + } + return updated.Status.Phase, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(phase)) + } + }) + + It("It should not update phase if container state does not exist", func() { + pod := new(corev1.Pod) + Eventually( + func() (bool, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod) + if err != nil { + return false, err + } + return true, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + pod.Status.Phase = corev1.PodRunning + err := k8sClient.Status().Update(ctx, pod) + Expect(err).To(BeNil(), "failed to patch pod status") + + Consistently( + func() (corev1.PodPhase, error) { + updated := new(v1alpha1.EphemeralRunner) + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil { + return corev1.PodUnknown, err + } + return updated.Status.Phase, nil + }, + timeout, + ).Should(BeEquivalentTo("")) + }) + + It("It should not re-create pod indefinitely", func() { + pod := new(corev1.Pod) + failures := 0 + for i := 0; i < 6; i++ { + Eventually( + func() (bool, error) { + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { + return false, err + } + return true, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{ + Name: EphemeralRunnerContainerName, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + }, + }, + }) + err := k8sClient.Status().Update(ctx, pod) + Expect(err).To(BeNil(), "Failed to update pod status") + + failures++ + + updated := new(v1alpha1.EphemeralRunner) + Eventually(func() (bool, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) + if err != nil { + return false, err + } + return len(updated.Status.Failures) == failures, nil + }, timeout, interval).Should(BeEquivalentTo(true)) + } + + Eventually(func() (bool, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod) + if err == nil { + return false, nil + } + return kerrors.IsNotFound(err), nil + }, timeout, interval).Should(BeEquivalentTo(true)) + }) + + It("It should re-create pod on eviction", func() { + pod := new(corev1.Pod) + Eventually( + func() (bool, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod) + if err != nil { + return false, err + } + return true, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + pod.Status.Phase = corev1.PodFailed + pod.Status.Reason = "Evicted" + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{ + Name: EphemeralRunnerContainerName, + State: corev1.ContainerState{}, + }) + err := k8sClient.Status().Update(ctx, pod) + Expect(err).To(BeNil(), "failed to patch pod status") + + updated := new(v1alpha1.EphemeralRunner) + Eventually(func() (bool, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) + if err != nil { + return false, err + } + return len(updated.Status.Failures) == 1, nil + }, timeout, interval).Should(BeEquivalentTo(true)) + + // should re-create after failure + Eventually( + func() (bool, error) { + pod := new(corev1.Pod) + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { + return false, err + } + return true, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + }) + + It("It should re-create pod on exit status 0, but runner exists within the service", func() { + pod := new(corev1.Pod) + Eventually( + func() (bool, error) { + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { + return false, err + } + return true, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{ + Name: EphemeralRunnerContainerName, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + }) + err := k8sClient.Status().Update(ctx, pod) + Expect(err).To(BeNil(), "failed to update pod status") + + updated := new(v1alpha1.EphemeralRunner) + Eventually(func() (bool, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) + if err != nil { + return false, err + } + return len(updated.Status.Failures) == 1, nil + }, timeout, interval).Should(BeEquivalentTo(true)) + + // should re-create after failure + Eventually( + func() (bool, error) { + pod := new(corev1.Pod) + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { + return false, err + } + return true, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + }) + + It("It should not set the phase to succeeded without pod termination status", func() { + pod := new(corev1.Pod) + Eventually( + func() (bool, error) { + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { + return false, err + } + return true, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(true)) + + // first set phase to running + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{ + Name: EphemeralRunnerContainerName, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Now(), + }, + }, + }) + pod.Status.Phase = corev1.PodRunning + err := k8sClient.Status().Update(ctx, pod) + Expect(err).To(BeNil()) + + Eventually( + func() (corev1.PodPhase, error) { + updated := new(v1alpha1.EphemeralRunner) + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil { + return "", err + } + return updated.Status.Phase, nil + }, + timeout, + interval, + ).Should(BeEquivalentTo(corev1.PodRunning)) + + // set phase to succeeded + pod.Status.Phase = corev1.PodSucceeded + err = k8sClient.Status().Update(ctx, pod) + Expect(err).To(BeNil()) + + Consistently( + func() (corev1.PodPhase, error) { + updated := new(v1alpha1.EphemeralRunner) + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil { + return "", err + } + return updated.Status.Phase, nil + }, + timeout, + ).Should(BeEquivalentTo(corev1.PodRunning)) + }) + }) + + Describe("Checking the API", func() { + var ctx context.Context + var cancel context.CancelFunc + + autoScalingNS := new(corev1.Namespace) + configSecret := new(corev1.Secret) + + var mgr manager.Manager + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + autoScalingNS = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testns-autoscaling-runner" + RandStringRunes(5), + }, + } + err := k8sClient.Create(ctx, autoScalingNS) + Expect(err).To(BeNil(), "failed to create test namespace for EphemeralRunner") + + configSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-config-secret", + Namespace: autoScalingNS.Name, + }, + Data: map[string][]byte{ + "github_token": []byte(gh_token), + }, + } + + err = k8sClient.Create(ctx, configSecret) + Expect(err).To(BeNil(), "failed to create config secret") + + mgr, err = ctrl.NewManager(cfg, ctrl.Options{ + Namespace: autoScalingNS.Name, + MetricsBindAddress: "0", + }) + Expect(err).To(BeNil(), "failed to create manager") + + }) + + AfterEach(func() { + defer cancel() + + err := k8sClient.Delete(ctx, autoScalingNS) + Expect(err).To(BeNil(), "failed to delete test namespace for EphemeralRunner") + }) + + It("It should set the Phase to Succeeded", func() { + controller := &EphemeralRunnerReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ActionsClient: fake.NewMultiClient( + fake.WithDefaultClient( + fake.NewFakeClient( + fake.WithGetRunner( + nil, + &actions.ActionsError{ + StatusCode: http.StatusNotFound, + ExceptionName: "AgentNotFoundException", + }, + ), + ), + nil, + ), + ), + } + + err := controller.SetupWithManager(mgr) + Expect(err).To(BeNil(), "failed to setup controller") + + go func() { + defer GinkgoRecover() + + err := mgr.Start(ctx) + Expect(err).To(BeNil(), "failed to start manager") + }() + + ephemeralRunner := newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name) + + err = k8sClient.Create(ctx, ephemeralRunner) + Expect(err).To(BeNil()) + + pod := new(corev1.Pod) + Eventually(func() (bool, error) { + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { + return false, err + } + return true, nil + }, timeout, interval).Should(BeEquivalentTo(true)) + + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{ + Name: EphemeralRunnerContainerName, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + }) + err = k8sClient.Status().Update(ctx, pod) + Expect(err).To(BeNil(), "failed to update pod status") + + updated := new(v1alpha1.EphemeralRunner) + Eventually(func() (corev1.PodPhase, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) + if err != nil { + return "", nil + } + return updated.Status.Phase, nil + }, timeout, interval).Should(BeEquivalentTo(corev1.PodSucceeded)) + }) + }) +}) diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go new file mode 100644 index 0000000000..ed0776a9be --- /dev/null +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -0,0 +1,463 @@ +/* +Copyright 2020 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package actionsgithubcom + +import ( + "context" + "errors" + "fmt" + "net/http" + "sort" + "strings" + + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/github/actions" + "github.com/go-logr/logr" + "go.uber.org/multierr" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +const ( + ephemeralRunnerSetReconcilerOwnerKey = ".metadata.controller" + ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer" +) + +// EphemeralRunnerSetReconciler reconciles a EphemeralRunnerSet object +type EphemeralRunnerSetReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + ActionsClient actions.MultiClient + + resourceBuilder resourceBuilder +} + +//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// The responsibility of this controller is to bring the state to the desired one, but it should +// avoid patching itself, because of the frequent patches that the listener is doing. +// The safe point where we can patch the resource is when we are reacting on finalizer. +// Then, the listener should be deleted first, to allow controller clean up resources without interruptions +// +// The resource should be created with finalizer. To leave it to this controller to add it, we would +// risk the same issue of patching the status. Responsibility of this controller should only +// be to bring the count of EphemeralRunners to the desired one, not to patch this resource +// until it is safe to do so +func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("ephemeralrunnerset", req.NamespacedName) + + ephemeralRunnerSet := new(v1alpha1.EphemeralRunnerSet) + if err := r.Get(ctx, req.NamespacedName, ephemeralRunnerSet); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Requested deletion does not need reconciled. + if !ephemeralRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() { + if controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) { + log.Info("Deleting resources") + done, err := r.cleanUpEphemeralRunners(ctx, ephemeralRunnerSet, log) + if err != nil { + log.Error(err, "Failed to clean up EphemeralRunners") + return ctrl.Result{}, err + } + if !done { + log.Info("Waiting for resources to be deleted") + return ctrl.Result{}, nil + } + + log.Info("Removing finalizer") + if err := patch(ctx, r.Client, ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) { + controllerutil.RemoveFinalizer(obj, ephemeralRunnerSetFinalizerName) + }); err != nil && !kerrors.IsNotFound(err) { + log.Error(err, "Failed to update ephemeral runner set with removed finalizer") + return ctrl.Result{}, err + } + + log.Info("Successfully removed finalizer after cleanup") + return ctrl.Result{}, nil + } + return ctrl.Result{}, nil + } + + // Add finalizer if not present + if !controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) { + log.Info("Adding finalizer") + if err := patch(ctx, r.Client, ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) { + controllerutil.AddFinalizer(obj, ephemeralRunnerSetFinalizerName) + }); err != nil { + log.Error(err, "Failed to update ephemeral runner set with finalizer added") + return ctrl.Result{}, err + } + + log.Info("Successfully added finalizer") + return ctrl.Result{}, nil + } + + // Find all EphemeralRunner with matching namespace and own by this EphemeralRunnerSet. + ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList) + err := r.List( + ctx, + ephemeralRunnerList, + client.InNamespace(req.Namespace), + client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: req.Name}, + ) + if err != nil { + log.Error(err, "Unable to list child ephemeral runners") + return ctrl.Result{}, err + } + + pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners := categorizeEphemeralRunners(ephemeralRunnerList) + + log.Info("Ephemeral runner counts", + "pending", len(pendingEphemeralRunners), + "running", len(runningEphemeralRunners), + "finished", len(finishedEphemeralRunners), + "failed", len(failedEphemeralRunners), + "deleting", len(deletingEphemeralRunners), + ) + + // cleanup finished runners and proceed + var errs []error + for i := range finishedEphemeralRunners { + log.Info("Deleting finished ephemeral runner", "name", finishedEphemeralRunners[i].Name) + if err := r.Delete(ctx, finishedEphemeralRunners[i]); err != nil { + if !kerrors.IsNotFound(err) { + errs = append(errs, err) + } + } + } + + if len(errs) > 0 { + mergedErrs := multierr.Combine(errs...) + log.Error(mergedErrs, "Failed to delete finished ephemeral runners") + return ctrl.Result{}, mergedErrs + } + + total := len(pendingEphemeralRunners) + len(runningEphemeralRunners) + len(failedEphemeralRunners) + log.Info("Scaling comparison", "current", total, "desired", ephemeralRunnerSet.Spec.Replicas) + switch { + case total < ephemeralRunnerSet.Spec.Replicas: // Handle scale up + count := ephemeralRunnerSet.Spec.Replicas - total + log.Info("Creating new ephemeral runners (scale up)", "count", count) + if err := r.createEphemeralRunners(ctx, ephemeralRunnerSet, count, log); err != nil { + log.Error(err, "failed to make ephemeral runner") + return ctrl.Result{}, err + } + + case total > ephemeralRunnerSet.Spec.Replicas: // Handle scale down scenario. + count := total - ephemeralRunnerSet.Spec.Replicas + log.Info("Deleting ephemeral runners (scale down)", "count", count) + if err := r.deleteIdleEphemeralRunners(ctx, ephemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners, count, log); err != nil { + log.Error(err, "failed to delete idle runners") + return ctrl.Result{}, err + } + } + + // Update the status if needed. + if ephemeralRunnerSet.Status.CurrentReplicas != total { + log.Info("Updating status with current runners count", "count", total) + if err := patch(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) { + obj.Status.CurrentReplicas = total + }); err != nil { + log.Error(err, "Failed to update status with current runners count") + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + +func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (done bool, err error) { + ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList) + err = r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: ephemeralRunnerSet.Name}) + if err != nil { + return false, fmt.Errorf("failed to list child ephemeral runners: %v", err) + } + + // only if there are no ephemeral runners left, return true + if len(ephemeralRunnerList.Items) == 0 { + log.Info("All ephemeral runners are deleted") + return true, nil + } + + pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners := categorizeEphemeralRunners(ephemeralRunnerList) + + log.Info("Clean up runner counts", + "pending", len(pendingEphemeralRunners), + "running", len(runningEphemeralRunners), + "finished", len(finishedEphemeralRunners), + "failed", len(failedEphemeralRunners), + "deleting", len(deletingEphemeralRunners), + ) + + log.Info("Cleanup finished or failed ephemeral runners") + var errs []error + for _, ephemeralRunner := range append(finishedEphemeralRunners, failedEphemeralRunners...) { + log.Info("Deleting ephemeral runner", "name", ephemeralRunner.Name) + if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + mergedErrs := multierr.Combine(errs...) + log.Error(mergedErrs, "Failed to delete ephemeral runners") + return false, mergedErrs + } + + // avoid fetching the client if we have nothing left to do + if len(runningEphemeralRunners) == 0 && len(pendingEphemeralRunners) == 0 { + return false, nil + } + + actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet) + if err != nil { + return false, err + } + + log.Info("Cleanup pending or running ephemeral runners") + errs = errs[0:0] + for _, ephemeralRunner := range append(pendingEphemeralRunners, runningEphemeralRunners...) { + log.Info("Removing the ephemeral runner from the service", "name", ephemeralRunner.Name) + _, err := r.deleteEphemeralRunnerWithActionsClient(ctx, ephemeralRunner, actionsClient, log) + if err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + mergedErrs := multierr.Combine(errs...) + log.Error(mergedErrs, "Failed to remove ephemeral runners from the service") + return false, mergedErrs + } + + return false, nil +} + +// createEphemeralRunners provisions `count` number of v1alpha1.EphemeralRunner resources in the cluster. +func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Context, runnerSet *v1alpha1.EphemeralRunnerSet, count int, log logr.Logger) error { + // Track multiple errors at once and return the bundle. + errs := make([]error, 0) + for i := 0; i < count; i++ { + ephemeralRunner := r.resourceBuilder.newEphemeralRunner(runnerSet) + + // Make sure that we own the resource we create. + if err := ctrl.SetControllerReference(runnerSet, ephemeralRunner, r.Scheme); err != nil { + log.Error(err, "failed to set controller reference on ephemeral runner") + errs = append(errs, err) + continue + } + + log.Info("Creating new ephemeral runner", "progress", i+1, "total", count) + if err := r.Create(ctx, ephemeralRunner); err != nil { + log.Error(err, "failed to make ephemeral runner") + errs = append(errs, err) + continue + } + + log.Info("Created new ephemeral runner", "runner", ephemeralRunner.Name) + } + + return multierr.Combine(errs...) +} + +// deleteIdleEphemeralRunners try to deletes `count` number of v1alpha1.EphemeralRunner resources in the cluster. +// It will only delete `v1alpha1.EphemeralRunner` that has registered with Actions service +// which has a `v1alpha1.EphemeralRunner.Status.RunnerId` set. +// So, it is possible that this function will not delete enough ephemeral runners +// if there are not enough ephemeral runners that have registered with Actions service. +// When this happens, the next reconcile loop will try to delete the remaining ephemeral runners +// after we get notified by any of the `v1alpha1.EphemeralRunner.Status` updates. +func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners []*v1alpha1.EphemeralRunner, count int, log logr.Logger) error { + runners := newEphemeralRunnerStepper(pendingEphemeralRunners, runningEphemeralRunners) + if runners.len() == 0 { + log.Info("No pending or running ephemeral runners running at this time for scale down") + return nil + } + actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet) + if err != nil { + return fmt.Errorf("failed to create actions client for ephemeral runner replica set: %v", err) + } + var errs []error + deletedCount := 0 + for runners.next() { + ephemeralRunner := runners.object() + if ephemeralRunner.Status.RunnerId == 0 { + log.Info("Skipping ephemeral runner since it is not registered yet", "name", ephemeralRunner.Name) + continue + } + + if ephemeralRunner.Status.JobRequestId > 0 { + log.Info("Skipping ephemeral runner since it is running a job", "name", ephemeralRunner.Name, "jobRequestId", ephemeralRunner.Status.JobRequestId) + continue + } + + log.Info("Removing the idle ephemeral runner", "name", ephemeralRunner.Name) + ok, err := r.deleteEphemeralRunnerWithActionsClient(ctx, ephemeralRunner, actionsClient, log) + if err != nil { + errs = append(errs, err) + } + if !ok { + continue + } + + deletedCount++ + if deletedCount == count { + break + } + } + + return multierr.Combine(errs...) +} + +func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, actionsClient actions.ActionsService, log logr.Logger) (bool, error) { + if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil { + actionsError := &actions.ActionsError{} + if errors.As(err, &actionsError) && + actionsError.StatusCode == http.StatusBadRequest && + strings.Contains(actionsError.ExceptionName, "JobStillRunningException") { + // Runner is still running a job, proceed with the next one + return false, nil + } + + return false, err + } + + log.Info("Deleting ephemeral runner after removing from the service", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerId) + if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) { + return false, err + } + + log.Info("Deleted ephemeral runner", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerId) + return true, nil +} + +func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) (actions.ActionsService, error) { + secret := new(corev1.Secret) + if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil { + return nil, fmt.Errorf("failed to get secret: %w", err) + } + + return r.ActionsClient.GetClientFromSecret(ctx, rs.Spec.EphemeralRunnerSpec.GitHubConfigUrl, rs.Namespace, secret.Data) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { + // Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups. + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, ephemeralRunnerSetReconcilerOwnerKey, func(rawObj client.Object) []string { + groupVersion := v1alpha1.GroupVersion.String() + + // grab the job object, extract the owner... + ephemeralRunner := rawObj.(*v1alpha1.EphemeralRunner) + owner := metav1.GetControllerOf(ephemeralRunner) + if owner == nil { + return nil + } + + // ...make sure it is owned by this controller + if owner.APIVersion != groupVersion || owner.Kind != "EphemeralRunnerSet" { + return nil + } + + // ...and if so, return it + return []string{owner.Name} + }); err != nil { + return err + } + + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.EphemeralRunnerSet{}). + Owns(&v1alpha1.EphemeralRunner{}). + WithEventFilter(predicate.ResourceVersionChangedPredicate{}). + Complete(r) +} + +type ephemeralRunnerStepper struct { + items []*v1alpha1.EphemeralRunner + index int +} + +func newEphemeralRunnerStepper(pending, running []*v1alpha1.EphemeralRunner) *ephemeralRunnerStepper { + sort.Slice(pending, func(i, j int) bool { + return pending[i].GetCreationTimestamp().Time.Before(pending[j].GetCreationTimestamp().Time) + }) + sort.Slice(running, func(i, j int) bool { + return running[i].GetCreationTimestamp().Time.Before(running[j].GetCreationTimestamp().Time) + }) + + return &ephemeralRunnerStepper{ + items: append(pending, running...), + index: -1, + } +} + +func (s *ephemeralRunnerStepper) next() bool { + if s.index+1 < len(s.items) { + s.index++ + return true + } + return false +} + +func (s *ephemeralRunnerStepper) object() *v1alpha1.EphemeralRunner { + if s.index >= 0 && s.index < len(s.items) { + return s.items[s.index] + } + return nil +} + +func (s *ephemeralRunnerStepper) len() int { + return len(s.items) +} + +func categorizeEphemeralRunners(ephemeralRunnerList *v1alpha1.EphemeralRunnerList) (pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners []*v1alpha1.EphemeralRunner) { + for i := range ephemeralRunnerList.Items { + r := &ephemeralRunnerList.Items[i] + if !r.ObjectMeta.DeletionTimestamp.IsZero() { + deletingEphemeralRunners = append(deletingEphemeralRunners, r) + continue + } + + switch r.Status.Phase { + case corev1.PodRunning: + runningEphemeralRunners = append(runningEphemeralRunners, r) + case corev1.PodSucceeded: + finishedEphemeralRunners = append(finishedEphemeralRunners, r) + case corev1.PodFailed: + failedEphemeralRunners = append(failedEphemeralRunners, r) + default: + // Pending or no phase should be considered as pending. + // + // If field is not set, that means that the EphemeralRunner + // did not yet have chance to update the Status.Phase field. + pendingEphemeralRunners = append(pendingEphemeralRunners, r) + } + } + return +} diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go new file mode 100644 index 0000000000..aa53504bbc --- /dev/null +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -0,0 +1,445 @@ +package actionsgithubcom + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/github/actions/fake" +) + +const ( + ephemeralRunnerSetTestTimeout = time.Second * 5 + ephemeralRunnerSetTestInterval = time.Millisecond * 250 + ephemeralRunnerSetTestGitHubToken = "gh_token" +) + +var _ = Describe("Test EphemeralRunnerSet controller", func() { + var ctx context.Context + var cancel context.CancelFunc + autoScalingNS := new(corev1.Namespace) + ephemeralRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + configSecret := new(corev1.Secret) + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.TODO()) + autoScalingNS = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-runnerset" + RandStringRunes(5)}, + } + + err := k8sClient.Create(ctx, autoScalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for EphemeralRunnerSet") + + configSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-config-secret", + Namespace: autoScalingNS.Name, + }, + Data: map[string][]byte{ + "github_token": []byte(ephemeralRunnerSetTestGitHubToken), + }, + } + + err = k8sClient.Create(ctx, configSecret) + Expect(err).NotTo(HaveOccurred(), "failed to create config secret") + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Namespace: autoScalingNS.Name, + MetricsBindAddress: "0", + }) + Expect(err).NotTo(HaveOccurred(), "failed to create manager") + + controller := &EphemeralRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ActionsClient: fake.NewMultiClient(), + } + err = controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoScalingNS.Name, + }, + Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ + EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + RunnerScaleSetId: 100, + PodTemplateSpec: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + }, + } + + err = k8sClient.Create(ctx, ephemeralRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet") + + go func() { + defer GinkgoRecover() + + err := mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred(), "failed to start manager") + }() + }) + + AfterEach(func() { + defer cancel() + + err := k8sClient.Delete(ctx, autoScalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for EphemeralRunnerSet") + }) + + Context("When creating a new EphemeralRunnerSet", func() { + It("It should create/add all required resources for a new EphemeralRunnerSet (finalizer)", func() { + // Check if finalizer is added + created := new(actionsv1alpha1.EphemeralRunnerSet) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) + if err != nil { + return "", err + } + if len(created.Finalizers) == 0 { + return "", nil + } + return created.Finalizers[0], nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(ephemeralRunnerSetFinalizerName), "EphemeralRunnerSet should have a finalizer") + + // Check if the number of ephemeral runners are stay 0 + Consistently( + func() (int, error) { + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "No EphemeralRunner should be created") + + // Check if the status stay 0 + Consistently( + func() (int, error) { + runnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet) + if err != nil { + return -1, err + } + + return int(runnerSet.Status.CurrentReplicas), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "EphemeralRunnerSet status should be 0") + + // Scaling up the EphemeralRunnerSet + updated := created.DeepCopy() + updated.Spec.Replicas = 5 + err := k8sClient.Update(ctx, updated) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + // Check if the number of ephemeral runners are created + Eventually( + func() (int, error) { + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created") + + // Check if the status is updated + Eventually( + func() (int, error) { + runnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet) + if err != nil { + return -1, err + } + + return int(runnerSet.Status.CurrentReplicas), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "EphemeralRunnerSet status should be 5") + }) + }) + + Context("When deleting a new EphemeralRunnerSet", func() { + It("It should cleanup all resources for a deleting EphemeralRunnerSet before removing it", func() { + created := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + // Scale up the EphemeralRunnerSet + updated := created.DeepCopy() + updated.Spec.Replicas = 5 + err = k8sClient.Update(ctx, updated) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + // Wait for the EphemeralRunnerSet to be scaled up + Eventually( + func() (int, error) { + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created") + + // Delete the EphemeralRunnerSet + err = k8sClient.Delete(ctx, created) + Expect(err).NotTo(HaveOccurred(), "failed to delete EphemeralRunnerSet") + + // Check if all ephemeral runners are deleted + Eventually( + func() (int, error) { + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "All EphemeralRunner should be deleted") + + // Check if the EphemeralRunnerSet is deleted + Eventually( + func() error { + deleted := new(actionsv1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, deleted) + if err != nil { + if kerrors.IsNotFound(err) { + return nil + } + + return err + } + + return fmt.Errorf("EphemeralRunnerSet is not deleted") + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(Succeed(), "EphemeralRunnerSet should be deleted") + }) + }) + + Context("When a new EphemeralRunnerSet scale up and down", func() { + It("It should delete finished EphemeralRunner and create new EphemeralRunner", func() { + created := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + // Scale up the EphemeralRunnerSet + updated := created.DeepCopy() + updated.Spec.Replicas = 5 + err = k8sClient.Update(ctx, updated) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + // Wait for the EphemeralRunnerSet to be scaled up + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created") + + // Set status to simulate a configured EphemeralRunner + for i, runner := range runnerList.Items { + updatedRunner := runner.DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + updatedRunner.Status.RunnerId = i + 100 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + } + + // Mark one of the EphemeralRunner as finished + finishedRunner := runnerList.Items[4].DeepCopy() + finishedRunner.Status.Phase = corev1.PodSucceeded + err = k8sClient.Status().Patch(ctx, finishedRunner, client.MergeFrom(&runnerList.Items[4])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + // Wait for the finished EphemeralRunner to be deleted + Eventually( + func() error { + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return err + } + + for _, runner := range runnerList.Items { + if runner.Name == finishedRunner.Name { + return fmt.Errorf("EphemeralRunner is not deleted") + } + } + + return nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(Succeed(), "Finished EphemeralRunner should be deleted") + + // We should still have the EphemeralRunnerSet scale up + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created") + + // Set status to simulate a configured EphemeralRunner + for i, runner := range runnerList.Items { + updatedRunner := runner.DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + updatedRunner.Status.RunnerId = i + 100 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + } + + // Scale down the EphemeralRunnerSet + updated = created.DeepCopy() + updated.Spec.Replicas = 3 + err = k8sClient.Patch(ctx, updated, client.MergeFrom(created)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + // Wait for the EphemeralRunnerSet to be scaled down + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(3), "3 EphemeralRunner should be created") + + // We will not scale down runner that is running jobs + runningRunner := runnerList.Items[0].DeepCopy() + runningRunner.Status.JobRequestId = 1000 + err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[0])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + runningRunner = runnerList.Items[1].DeepCopy() + runningRunner.Status.JobRequestId = 1001 + err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[0])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + // Scale down to 1 + updated = created.DeepCopy() + updated.Spec.Replicas = 1 + err = k8sClient.Patch(ctx, updated, client.MergeFrom(created)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + // Wait for the EphemeralRunnerSet to be scaled down to 2 since we still have 2 runner running jobs + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") + + // We will not scale down failed runner + failedRunner := runnerList.Items[0].DeepCopy() + failedRunner.Status.Phase = corev1.PodFailed + err = k8sClient.Status().Patch(ctx, failedRunner, client.MergeFrom(&runnerList.Items[0])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + // Scale down to 0 + updated = created.DeepCopy() + updated.Spec.Replicas = 0 + err = k8sClient.Patch(ctx, updated, client.MergeFrom(created)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + // We should not scale down the EphemeralRunnerSet since we still have 1 runner running job and 1 failed runner + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Consistently( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") + + // We will scale down to 0 when the running job is completed and the failed runner is deleted + runningRunner = runnerList.Items[1].DeepCopy() + runningRunner.Status.Phase = corev1.PodSucceeded + err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[1])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + err = k8sClient.Delete(ctx, &runnerList.Items[0]) + Expect(err).NotTo(HaveOccurred(), "failed to delete EphemeralRunner") + + // Wait for the EphemeralRunnerSet to be scaled down to 0 + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "0 EphemeralRunner should be created") + }) + }) +}) diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go new file mode 100644 index 0000000000..45df0b4cda --- /dev/null +++ b/controllers/actions.github.com/resourcebuilder.go @@ -0,0 +1,437 @@ +package actionsgithubcom + +import ( + "context" + "fmt" + "math" + "strconv" + + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/hash" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + jitTokenKey = "jitToken" +) + +type resourceBuilder struct { +} + +func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret) *corev1.Pod { + newLabels := map[string]string{} + newLabels[scaleSetListenerLabel] = fmt.Sprintf("%v-%v", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, autoscalingListener.Spec.AutoscalingRunnerSetName) + + listenerEnv := []corev1.EnvVar{ + { + Name: "GITHUB_CONFIGURE_URL", + Value: autoscalingListener.Spec.GitHubConfigUrl, + }, + { + Name: "GITHUB_EPHEMERAL_RUNNER_SET_NAMESPACE", + Value: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + }, + { + Name: "GITHUB_EPHEMERAL_RUNNER_SET_NAME", + Value: autoscalingListener.Spec.EphemeralRunnerSetName, + }, + { + Name: "GITHUB_MAX_RUNNERS", + Value: strconv.Itoa(autoscalingListener.Spec.MaxRunners), + }, + { + Name: "GITHUB_MIN_RUNNERS", + Value: strconv.Itoa(autoscalingListener.Spec.MinRunners), + }, + { + Name: "GITHUB_RUNNER_SCALE_SET_ID", + Value: strconv.Itoa(autoscalingListener.Spec.RunnerScaleSetId), + }, + } + + if _, ok := secret.Data["github_token"]; ok { + listenerEnv = append(listenerEnv, corev1.EnvVar{ + Name: "GITHUB_TOKEN", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Key: "github_token", + }, + }, + }) + } + + if _, ok := secret.Data["github_app_id"]; ok { + listenerEnv = append(listenerEnv, corev1.EnvVar{ + Name: "GITHUB_APP_ID", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Key: "github_app_id", + }, + }, + }) + } + + if _, ok := secret.Data["github_app_installation_id"]; ok { + listenerEnv = append(listenerEnv, corev1.EnvVar{ + Name: "GITHUB_APP_INSTALLATION_ID", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Key: "github_app_installation_id", + }, + }, + }) + } + + if _, ok := secret.Data["github_app_private_key"]; ok { + listenerEnv = append(listenerEnv, corev1.EnvVar{ + Name: "GITHUB_APP_PRIVATE_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Key: "github_app_private_key", + }, + }, + }) + } + + podSpec := corev1.PodSpec{ + ServiceAccountName: serviceAccount.Name, + Containers: []corev1.Container{ + { + Name: name, + Image: autoscalingListener.Spec.Image, + Env: listenerEnv, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{ + "/github-runnerscaleset-listener", + }, + }, + }, + ImagePullSecrets: autoscalingListener.Spec.ImagePullSecrets, + RestartPolicy: corev1.RestartPolicyNever, + } + + newRunnerScaleSetListenerPod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: autoscalingListener.Name, + Namespace: autoscalingListener.Namespace, + Labels: newLabels, + }, + Spec: podSpec, + } + + return newRunnerScaleSetListenerPod +} + +func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) { + runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) + if err != nil { + return nil, err + } + runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() + + newLabels := map[string]string{} + newLabels[LabelKeyRunnerSpecHash] = runnerSpecHash + + newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-", + Namespace: autoscalingRunnerSet.ObjectMeta.Namespace, + Labels: newLabels, + }, + Spec: v1alpha1.EphemeralRunnerSetSpec{ + Replicas: 0, + EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ + RunnerScaleSetId: runnerScaleSetId, + GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl, + GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret, + Proxy: autoscalingRunnerSet.Spec.Proxy, + GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS, + PodTemplateSpec: autoscalingRunnerSet.Spec.Template, + }, + }, + } + + return newEphemeralRunnerSet, nil +} + +func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: scaleSetListenerServiceAccountName(autoscalingListener), + Namespace: autoscalingListener.Namespace, + Labels: map[string]string{ + "auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + "auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName, + }, + }, + } +} + +func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role { + rules := rulesForListenerRole([]string{autoscalingListener.Spec.EphemeralRunnerSetName}) + rulesHash := hash.ComputeTemplateHash(&rules) + newRole := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: scaleSetListenerRoleName(autoscalingListener), + Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + Labels: map[string]string{ + "auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + "auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName, + "auto-scaling-listener-namespace": autoscalingListener.Namespace, + "auto-scaling-listener-name": autoscalingListener.Name, + "role-policy-rules-hash": rulesHash, + }, + }, + Rules: rules, + } + + return newRole +} + +func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding { + roleRef := rbacv1.RoleRef{ + Kind: "Role", + Name: listenerRole.Name, + } + roleRefHash := hash.ComputeTemplateHash(&roleRef) + + subjects := []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Namespace: serviceAccount.Namespace, + Name: serviceAccount.Name, + }, + } + subjectHash := hash.ComputeTemplateHash(&subjects) + + newRoleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: scaleSetListenerRoleName(autoscalingListener), + Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + Labels: map[string]string{ + "auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + "auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + "auto-scaling-listener-namespace": autoscalingListener.Namespace, + "auto-scaling-listener-name": autoscalingListener.Name, + "role-binding-role-ref-hash": roleRefHash, + "role-binding-subject-hash": subjectHash, + }, + }, + RoleRef: roleRef, + Subjects: subjects, + } + + return newRoleBinding +} + +func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret { + dataHash := hash.ComputeTemplateHash(&secret.Data) + + newListenerSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: scaleSetListenerSecretMirrorName(autoscalingListener), + Namespace: autoscalingListener.Namespace, + Labels: map[string]string{ + "auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + "auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName, + "secret-data-hash": dataHash, + }, + }, + Data: secret.DeepCopy().Data, + } + + return newListenerSecret +} + +func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) { + runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) + if err != nil { + return nil, err + } + + effectiveMinRunners := 0 + effectiveMaxRunners := math.MaxInt32 + if autoscalingRunnerSet.Spec.MaxRunners != nil { + effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners + } + if autoscalingRunnerSet.Spec.MinRunners != nil { + effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners + } + + autoscalingListener := &v1alpha1.AutoscalingListener{ + ObjectMeta: metav1.ObjectMeta{ + Name: scaleSetListenerName(autoscalingRunnerSet), + Namespace: namespace, + Labels: map[string]string{ + "auto-scaling-runner-set-namespace": autoscalingRunnerSet.Namespace, + "auto-scaling-runner-set-name": autoscalingRunnerSet.Name, + LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), + }, + }, + Spec: v1alpha1.AutoscalingListenerSpec{ + GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl, + GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret, + RunnerScaleSetId: runnerScaleSetId, + AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace, + AutoscalingRunnerSetName: autoscalingRunnerSet.Name, + EphemeralRunnerSetName: ephemeralRunnerSet.Name, + MinRunners: effectiveMinRunners, + MaxRunners: effectiveMaxRunners, + Image: image, + ImagePullSecrets: imagePullSecrets, + }, + } + + return autoscalingListener, nil +} + +func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner { + return &v1alpha1.EphemeralRunner{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: ephemeralRunnerSet.Name + "-runner-", + Namespace: ephemeralRunnerSet.Namespace, + }, + Spec: ephemeralRunnerSet.Spec.EphemeralRunnerSpec, + } +} + +func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret) *corev1.Pod { + var newPod corev1.Pod + + labels := map[string]string{} + annotations := map[string]string{} + + for k, v := range runner.ObjectMeta.Labels { + labels[k] = v + } + for k, v := range runner.Spec.PodTemplateSpec.Labels { + labels[k] = v + } + + for k, v := range runner.ObjectMeta.Annotations { + annotations[k] = v + } + for k, v := range runner.Spec.PodTemplateSpec.Annotations { + annotations[k] = v + } + + labels[LabelKeyPodTemplateHash] = hash.FNVHashStringObjects( + FilterLabels(labels, LabelKeyRunnerTemplateHash), + annotations, + runner.Spec, + runner.Status.RunnerJITConfig, + ) + + labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue) + + objectMeta := metav1.ObjectMeta{ + Name: runner.ObjectMeta.Name, + Namespace: runner.ObjectMeta.Namespace, + Labels: labels, + Annotations: annotations, + } + + newPod.ObjectMeta = objectMeta + newPod.Spec = runner.Spec.PodTemplateSpec.Spec + newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.PodTemplateSpec.Spec.Containers)) + + for _, c := range runner.Spec.PodTemplateSpec.Spec.Containers { + if c.Name == EphemeralRunnerContainerName { + c.Env = append(c.Env, corev1.EnvVar{ + Name: EnvVarRunnerJITConfig, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Key: jitTokenKey, + }, + }, + }) + } + + newPod.Spec.Containers = append(newPod.Spec.Containers, c) + } + + return &newPod +} + +func (b *resourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: ephemeralRunner.Name, + Namespace: ephemeralRunner.Namespace, + }, + Data: map[string][]byte{ + jitTokenKey: []byte(ephemeralRunner.Status.RunnerJITConfig), + }, + } +} + +func scaleSetListenerName(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) string { + namespaceHash := hash.FNVHashString(autoscalingRunnerSet.Namespace) + if len(namespaceHash) > 8 { + namespaceHash = namespaceHash[:8] + } + return fmt.Sprintf("%v-%v-listener", autoscalingRunnerSet.Name, namespaceHash) +} + +func scaleSetListenerServiceAccountName(autoscalingListener *v1alpha1.AutoscalingListener) string { + namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace) + if len(namespaceHash) > 8 { + namespaceHash = namespaceHash[:8] + } + return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash) +} + +func scaleSetListenerRoleName(autoscalingListener *v1alpha1.AutoscalingListener) string { + namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace) + if len(namespaceHash) > 8 { + namespaceHash = namespaceHash[:8] + } + return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash) +} + +func scaleSetListenerSecretMirrorName(autoscalingListener *v1alpha1.AutoscalingListener) string { + namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace) + if len(namespaceHash) > 8 { + namespaceHash = namespaceHash[:8] + } + return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash) +} + +func rulesForListenerRole(resourceNames []string) []rbacv1.PolicyRule { + return []rbacv1.PolicyRule{ + { + APIGroups: []string{"actions.github.com"}, + Resources: []string{"ephemeralrunnersets"}, + ResourceNames: resourceNames, + Verbs: []string{"patch"}, + }, + { + APIGroups: []string{"actions.github.com"}, + Resources: []string{"ephemeralrunners", "ephemeralrunners/status"}, + Verbs: []string{"patch"}, + }, + } +} diff --git a/controllers/actions.github.com/suite_test.go b/controllers/actions.github.com/suite_test.go new file mode 100644 index 0000000000..29b07f0de5 --- /dev/null +++ b/controllers/actions.github.com/suite_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2020 The actions-runner-controller authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package actionsgithubcom + +import ( + "os" + "path/filepath" + "testing" + + "github.com/onsi/ginkgo/config" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + config.GinkgoConfig.FocusStrings = append(config.GinkgoConfig.FocusStrings, os.Getenv("GINKGO_FOCUS")) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{printer.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("../..", "config", "crd", "bases")}, + } + + // Avoids the following error: + // 2021-03-19T15:14:11.673+0900 ERROR controller-runtime.controller Reconciler error {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"} + testEnv.ControlPlane.GetAPIServer().Configure(). + Append("allow-privileged", "true") + + var err error + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = actionsv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/controllers/actions.github.com/utils.go b/controllers/actions.github.com/utils.go new file mode 100644 index 0000000000..a77b24ba17 --- /dev/null +++ b/controllers/actions.github.com/utils.go @@ -0,0 +1,27 @@ +package actionsgithubcom + +import ( + "k8s.io/apimachinery/pkg/util/rand" +) + +func FilterLabels(labels map[string]string, filter string) map[string]string { + filtered := map[string]string{} + + for k, v := range labels { + if k != filter { + filtered[k] = v + } + } + + return filtered +} + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") + +func RandStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/controllers/actions.github.com/utils_test.go b/controllers/actions.github.com/utils_test.go new file mode 100644 index 0000000000..9e98b981bd --- /dev/null +++ b/controllers/actions.github.com/utils_test.go @@ -0,0 +1,34 @@ +package actionsgithubcom + +import ( + "reflect" + "testing" +) + +func Test_filterLabels(t *testing.T) { + type args struct { + labels map[string]string + filter string + } + tests := []struct { + name string + args args + want map[string]string + }{ + { + name: "ok", + args: args{ + labels: map[string]string{LabelKeyRunnerTemplateHash: "abc", LabelKeyPodTemplateHash: "def"}, + filter: LabelKeyRunnerTemplateHash, + }, + want: map[string]string{LabelKeyPodTemplateHash: "def"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := FilterLabels(tt.args.labels, tt.args.filter); !reflect.DeepEqual(got, tt.want) { + t.Errorf("FilterLabels() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/github/actions/client.go b/github/actions/client.go new file mode 100644 index 0000000000..8e0290869e --- /dev/null +++ b/github/actions/client.go @@ -0,0 +1,1101 @@ +package actions + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-logr/logr" + "github.com/golang-jwt/jwt/v4" + "github.com/google/uuid" + "github.com/hashicorp/go-retryablehttp" +) + +const ( + runnerEndpoint = "_apis/distributedtask/pools/0/agents" + scaleSetEndpoint = "_apis/runtime/runnerscalesets" + apiVersionQueryParam = "api-version=6.0-preview" +) + +//go:generate mockery --inpackage --name=ActionsService +type ActionsService interface { + GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*RunnerScaleSet, error) + GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int) (*RunnerScaleSet, error) + GetRunnerGroupByName(ctx context.Context, runnerGroup string) (*RunnerGroup, error) + CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) + + CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*RunnerScaleSetSession, error) + DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error + RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*RunnerScaleSetSession, error) + + AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) + GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*AcquirableJobList, error) + + GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*RunnerScaleSetMessage, error) + DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error + + GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *RunnerScaleSetJitRunnerSetting, scaleSetId int) (*RunnerScaleSetJitRunnerConfig, error) + + GetRunner(ctx context.Context, runnerId int64) (*RunnerReference, error) + GetRunnerByName(ctx context.Context, runnerName string) (*RunnerReference, error) + RemoveRunner(ctx context.Context, runnerId int64) error +} + +type Client struct { + *http.Client + + // lock for refreshing the ActionsServiceAdminToken and ActionsServiceAdminTokenExpiresAt + mu sync.Mutex + + // TODO: Convert to unexported fields once refactor of Listener is complete + ActionsServiceAdminToken *string + ActionsServiceAdminTokenExpiresAt *time.Time + ActionsServiceURL *string + + RetryMax *int + RetryWaitMax *time.Duration + + creds *ActionsAuth + githubConfigURL string + logger logr.Logger + userAgent string +} + +func NewClient(ctx context.Context, githubConfigURL string, creds *ActionsAuth, userAgent string, logger logr.Logger) (ActionsService, error) { + ac := &Client{ + creds: creds, + githubConfigURL: githubConfigURL, + logger: logger, + userAgent: userAgent, + } + + rt, err := ac.getRunnerRegistrationToken(ctx, githubConfigURL, *creds) + if err != nil { + return nil, fmt.Errorf("failed to get runner registration token: %w", err) + } + + adminConnInfo, err := ac.getActionsServiceAdminConnection(ctx, rt, githubConfigURL) + if err != nil { + return nil, fmt.Errorf("failed to get actions service admin connection: %w", err) + } + + ac.ActionsServiceURL = adminConnInfo.ActionsServiceUrl + + ac.mu.Lock() + defer ac.mu.Unlock() + ac.ActionsServiceAdminToken = adminConnInfo.AdminToken + ac.ActionsServiceAdminTokenExpiresAt, err = actionsServiceAdminTokenExpiresAt(*adminConnInfo.AdminToken) + if err != nil { + return nil, fmt.Errorf("failed to get admin token expire at: %w", err) + } + + return ac, nil +} + +func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*RunnerScaleSet, error) { + u := fmt.Sprintf("%s/%s?name=%s&api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetName) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, ParseActionsErrorFromResponse(resp) + } + var runnerScaleSetList *runnerScaleSetsResponse + err = unmarshalBody(resp, &runnerScaleSetList) + if err != nil { + return nil, err + } + if runnerScaleSetList.Count == 0 { + return nil, nil + } + if runnerScaleSetList.Count > 1 { + return nil, fmt.Errorf("multiple runner scale sets found with name %s", runnerScaleSetName) + } + + return &runnerScaleSetList.RunnerScaleSets[0], nil +} + +func (c *Client) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int) (*RunnerScaleSet, error) { + u := fmt.Sprintf("%s/%s/%d?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, ParseActionsErrorFromResponse(resp) + } + + var runnerScaleSet *RunnerScaleSet + err = unmarshalBody(resp, &runnerScaleSet) + if err != nil { + return nil, err + } + return runnerScaleSet, nil + +} + +func (c *Client) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (*RunnerGroup, error) { + u := fmt.Sprintf("%s/_apis/runtime/runnergroups/?groupName=%s&api-version=6.0-preview", *c.ActionsServiceURL, runnerGroup) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("unexpected status code: %d - body: %s", resp.StatusCode, string(body)) + } + + var runnerGroupList *RunnerGroupList + err = unmarshalBody(resp, &runnerGroupList) + if err != nil { + return nil, err + } + + if runnerGroupList.Count == 0 { + return nil, nil + } + + if runnerGroupList.Count > 1 { + return nil, fmt.Errorf("multiple runner group found with name %s", runnerGroup) + } + + return &runnerGroupList.RunnerGroups[0], nil +} + +func (c *Client) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) { + u := fmt.Sprintf("%s/%s?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + body, err := json.Marshal(runnerScaleSet) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, ParseActionsErrorFromResponse(resp) + } + var createdRunnerScaleSet *RunnerScaleSet + err = unmarshalBody(resp, &createdRunnerScaleSet) + if err != nil { + return nil, err + } + return createdRunnerScaleSet, nil +} + +func (c *Client) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) { + u := fmt.Sprintf("%s/%s/%d?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + body, err := json.Marshal(runnerScaleSet) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPut, u, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, ParseActionsErrorFromResponse(resp) + } + + var createdRunnerScaleSet *RunnerScaleSet + err = unmarshalBody(resp, &createdRunnerScaleSet) + if err != nil { + return nil, err + } + return createdRunnerScaleSet, nil +} + +func (c *Client) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int) error { + u := fmt.Sprintf("%s/%s/%d?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, u, nil) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + return ParseActionsErrorFromResponse(resp) + } + + defer resp.Body.Close() + return nil +} + +func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*RunnerScaleSetMessage, error) { + u := messageQueueUrl + if lastMessageId > 0 { + u = fmt.Sprintf("%s&lassMessageId=%d", messageQueueUrl, lastMessageId) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", "application/json; api-version=6.0-preview") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", messageQueueAccessToken)) + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusAccepted { + defer resp.Body.Close() + return nil, nil + } + + if resp.StatusCode != http.StatusOK { + if resp.StatusCode != http.StatusUnauthorized { + return nil, ParseActionsErrorFromResponse(resp) + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + body = trimByteOrderMark(body) + if err != nil { + return nil, err + } + return nil, &MessageQueueTokenExpiredError{msg: string(body)} + } + + var message *RunnerScaleSetMessage + err = unmarshalBody(resp, &message) + if err != nil { + return nil, err + } + return message, nil +} + +func (c *Client) DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error { + u, err := url.Parse(messageQueueUrl) + if err != nil { + return err + } + + u.Path = fmt.Sprintf("%s/%d", u.Path, messageId) + + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, u.String(), nil) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", messageQueueAccessToken)) + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + if resp.StatusCode != http.StatusUnauthorized { + return ParseActionsErrorFromResponse(resp) + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + body = trimByteOrderMark(body) + if err != nil { + return err + } + return &MessageQueueTokenExpiredError{msg: string(body)} + } + return nil +} + +func (c *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*RunnerScaleSetSession, error) { + u := fmt.Sprintf("%v/%v/%v/sessions?%v", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId, apiVersionQueryParam) + + newSession := &RunnerScaleSetSession{ + OwnerName: owner, + } + + requestData, err := json.Marshal(newSession) + if err != nil { + return nil, err + } + + createdSession := &RunnerScaleSetSession{} + + err = c.doSessionRequest(ctx, http.MethodPost, u, bytes.NewBuffer(requestData), http.StatusOK, createdSession) + + return createdSession, err +} + +func (c *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error { + u := fmt.Sprintf("%v/%v/%v/sessions/%v?%v", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId, sessionId.String(), apiVersionQueryParam) + + return c.doSessionRequest(ctx, http.MethodDelete, u, nil, http.StatusNoContent, nil) +} + +func (c *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*RunnerScaleSetSession, error) { + u := fmt.Sprintf("%v/%v/%v/sessions/%v?%v", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId, sessionId.String(), apiVersionQueryParam) + refreshedSession := &RunnerScaleSetSession{} + err := c.doSessionRequest(ctx, http.MethodPatch, u, nil, http.StatusOK, refreshedSession) + return refreshedSession, err +} + +func (c *Client) doSessionRequest(ctx context.Context, method, url string, requestData io.Reader, expectedResponseStatusCode int, responseUnmarshalTarget any) error { + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, method, url, requestData) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return err + } + + if resp.StatusCode == expectedResponseStatusCode && responseUnmarshalTarget != nil { + err = unmarshalBody(resp, &responseUnmarshalTarget) + return err + } + + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + return ParseActionsErrorFromResponse(resp) + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + body = trimByteOrderMark(body) + if err != nil { + return err + } + + return fmt.Errorf("unexpected status code: %d - body: %s", resp.StatusCode, string(body)) +} + +func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) { + u := fmt.Sprintf("%s/%s/%d/acquirejobs?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) + + body, err := json.Marshal(requestIds) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", messageQueueAccessToken)) + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, ParseActionsErrorFromResponse(resp) + } + + var acquiredJobs Int64List + err = unmarshalBody(resp, &acquiredJobs) + if err != nil { + return nil, err + } + + return acquiredJobs.Value, nil +} + +func (c *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*AcquirableJobList, error) { + u := fmt.Sprintf("%s/%s/%d/acquirablejobs?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusNoContent { + defer resp.Body.Close() + return &AcquirableJobList{Count: 0, Jobs: []AcquirableJob{}}, nil + } + + if resp.StatusCode != http.StatusOK { + return nil, ParseActionsErrorFromResponse(resp) + } + + var acquirableJobList *AcquirableJobList + err = unmarshalBody(resp, &acquirableJobList) + if err != nil { + return nil, err + } + + return acquirableJobList, nil +} + +func (c *Client) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *RunnerScaleSetJitRunnerSetting, scaleSetId int) (*RunnerScaleSetJitRunnerConfig, error) { + runnerJitConfigUrl := fmt.Sprintf("%s/%s/%d/generatejitconfig?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, scaleSetId) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + body, err := json.Marshal(jitRunnerSetting) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, runnerJitConfigUrl, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, ParseActionsErrorFromResponse(resp) + } + + var runnerJitConfig *RunnerScaleSetJitRunnerConfig + err = unmarshalBody(resp, &runnerJitConfig) + if err != nil { + return nil, err + } + return runnerJitConfig, nil +} + +func (c *Client) GetRunner(ctx context.Context, runnerId int64) (*RunnerReference, error) { + url := fmt.Sprintf("%v/%v/%v?%v", *c.ActionsServiceURL, runnerEndpoint, runnerId, apiVersionQueryParam) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, ParseActionsErrorFromResponse(resp) + } + + var runnerReference *RunnerReference + if err := unmarshalBody(resp, &runnerReference); err != nil { + return nil, err + } + + return runnerReference, nil +} + +func (c *Client) GetRunnerByName(ctx context.Context, runnerName string) (*RunnerReference, error) { + url := fmt.Sprintf("%v/%v?agentName=%v&%v", *c.ActionsServiceURL, runnerEndpoint, runnerName, apiVersionQueryParam) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, ParseActionsErrorFromResponse(resp) + } + + var runnerList *RunnerReferenceList + err = unmarshalBody(resp, &runnerList) + if err != nil { + return nil, err + } + + if runnerList.Count == 0 { + return nil, nil + } + + if runnerList.Count > 1 { + return nil, fmt.Errorf("multiple runner found with name %s", runnerName) + } + + return &runnerList.RunnerReferences[0], nil +} + +func (c *Client) RemoveRunner(ctx context.Context, runnerId int64) error { + url := fmt.Sprintf("%v/%v/%v?%v", *c.ActionsServiceURL, runnerEndpoint, runnerId, apiVersionQueryParam) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + httpClient := c.getHTTPClient() + + resp, err := httpClient.Do(req) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + return ParseActionsErrorFromResponse(resp) + } + + defer resp.Body.Close() + return nil +} + +type registrationToken struct { + Token *string `json:"token,omitempty"` + ExpiresAt *time.Time `json:"expires_at,omitempty"` +} + +func (c *Client) getRunnerRegistrationToken(ctx context.Context, githubConfigUrl string, creds ActionsAuth) (*registrationToken, error) { + registrationTokenURL, err := createRegistrationTokenURL(githubConfigUrl) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + req, err := http.NewRequestWithContext(ctx, http.MethodPost, registrationTokenURL, &buf) + if err != nil { + return nil, err + } + + bearerToken := "" + + if creds.Token != "" { + encodedToken := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("github:%v", creds.Token))) + bearerToken = fmt.Sprintf("Basic %v", encodedToken) + } else { + accessToken, err := c.fetchAccessToken(ctx, githubConfigUrl, creds.AppCreds) + if err != nil { + return nil, err + } + + bearerToken = fmt.Sprintf("Bearer %v", accessToken.Token) + } + + req.Header.Set("Content-Type", "application/vnd.github.v3+json") + req.Header.Set("Authorization", bearerToken) + req.Header.Set("User-Agent", c.userAgent) + + c.logger.Info("getting runner registration token", "registrationTokenURL", registrationTokenURL) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("unexpected response from Actions service during registration token call: %v - %v", resp.StatusCode, string(body)) + } + + registrationToken := ®istrationToken{} + if err := json.NewDecoder(resp.Body).Decode(registrationToken); err != nil { + return nil, err + } + + return registrationToken, nil +} + +// Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app +type accessToken struct { + Token string `json:"token"` + ExpiresAt time.Time `json:"expires_at"` +} + +func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, creds *GitHubAppAuth) (*accessToken, error) { + parsedGitHubConfigURL, err := url.Parse(gitHubConfigURL) + if err != nil { + return nil, err + } + + accessTokenJWT, err := createJWTForGitHubApp(creds) + if err != nil { + return nil, err + } + + ru := fmt.Sprintf("%v://%v/app/installations/%v/access_tokens", parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host, creds.AppInstallationID) + accessTokenURL, err := url.Parse(ru) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, accessTokenURL.String(), nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/vnd.github+json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", accessTokenJWT)) + req.Header.Add("User-Agent", c.userAgent) + + c.logger.Info("getting access token for GitHub App auth", "accessTokenURL", accessTokenURL.String()) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app + accessToken := &accessToken{} + err = json.NewDecoder(resp.Body).Decode(accessToken) + return accessToken, err +} + +type ActionsServiceAdminConnection struct { + ActionsServiceUrl *string `json:"url,omitempty"` + AdminToken *string `json:"token,omitempty"` +} + +func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *registrationToken, githubConfigUrl string) (*ActionsServiceAdminConnection, error) { + parsedGitHubConfigURL, err := url.Parse(githubConfigUrl) + if err != nil { + return nil, err + } + + if isHostedServer(*parsedGitHubConfigURL) { + parsedGitHubConfigURL.Host = fmt.Sprintf("api.%v", parsedGitHubConfigURL.Host) + } + + ru := fmt.Sprintf("%v://%v/actions/runner-registration", parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host) + registrationURL, err := url.Parse(ru) + if err != nil { + return nil, err + } + + body := struct { + Url string `json:"url"` + RunnerEvent string `json:"runner_event"` + }{ + Url: githubConfigUrl, + RunnerEvent: "register", + } + + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + + if err := enc.Encode(body); err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, registrationURL.String(), buf) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("RemoteAuth %s", *rt.Token)) + req.Header.Set("User-Agent", c.userAgent) + + c.logger.Info("getting Actions tenant URL and JWT", "registrationURL", registrationURL.String()) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + actionsServiceAdminConnection := &ActionsServiceAdminConnection{} + if err := json.NewDecoder(resp.Body).Decode(actionsServiceAdminConnection); err != nil { + return nil, err + } + + return actionsServiceAdminConnection, nil +} + +func isHostedServer(gitHubURL url.URL) bool { + return gitHubURL.Host == "github.com" || + gitHubURL.Host == "www.github.com" || + gitHubURL.Host == "github.localhost" +} + +func createRegistrationTokenURL(githubConfigUrl string) (string, error) { + parsedGitHubConfigURL, err := url.Parse(githubConfigUrl) + if err != nil { + return "", err + } + + // Check for empty path before split, because strings.Split will return a slice of length 1 + // when the split delimiter is not present. + trimmedPath := strings.TrimLeft(parsedGitHubConfigURL.Path, "/") + if len(trimmedPath) == 0 { + return "", fmt.Errorf("%q should point to an enterprise, org, or repository", parsedGitHubConfigURL.String()) + } + + pathParts := strings.Split(path.Clean(strings.TrimLeft(parsedGitHubConfigURL.Path, "/")), "/") + + switch len(pathParts) { + case 1: // Organization + registrationTokenURL := fmt.Sprintf( + "%v://%v/api/v3/orgs/%v/actions/runners/registration-token", + parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host, pathParts[0]) + + if isHostedServer(*parsedGitHubConfigURL) { + registrationTokenURL = fmt.Sprintf( + "%v://api.%v/orgs/%v/actions/runners/registration-token", + parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host, pathParts[0]) + } + + return registrationTokenURL, nil + case 2: // Repository or enterprise + repoScope := "repos/" + if strings.ToLower(pathParts[0]) == "enterprises" { + repoScope = "" + } + + registrationTokenURL := fmt.Sprintf("%v://%v/api/v3/%v%v/%v/actions/runners/registration-token", + parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host, repoScope, pathParts[0], pathParts[1]) + + if isHostedServer(*parsedGitHubConfigURL) { + registrationTokenURL = fmt.Sprintf("%v://api.%v/%v%v/%v/actions/runners/registration-token", + parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host, repoScope, pathParts[0], pathParts[1]) + } + + return registrationTokenURL, nil + default: + return "", fmt.Errorf("%q should point to an enterprise, org, or repository", parsedGitHubConfigURL.String()) + } +} + +func createJWTForGitHubApp(appAuth *GitHubAppAuth) (string, error) { + // Encode as JWT + // See https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app + + // Going back in time a bit helps with clock skew. + issuedAt := time.Now().Add(-60 * time.Second) + // Max expiration date is 10 minutes. + expiresAt := issuedAt.Add(9 * time.Minute) + claims := &jwt.RegisteredClaims{ + IssuedAt: jwt.NewNumericDate(issuedAt), + ExpiresAt: jwt.NewNumericDate(expiresAt), + Issuer: strconv.FormatInt(appAuth.AppID, 10), + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) + + privateKey, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(appAuth.AppPrivateKey)) + if err != nil { + return "", err + } + + return token.SignedString(privateKey) +} + +func (c *Client) getHTTPClient() *http.Client { + if c.Client != nil { + return c.Client + } + + retryClient := retryablehttp.NewClient() + + if c.RetryMax != nil { + retryClient.RetryMax = *c.RetryMax + } + + if c.RetryWaitMax != nil { + retryClient.RetryWaitMax = *c.RetryWaitMax + } + + return retryClient.StandardClient() +} + +func unmarshalBody(response *http.Response, v interface{}) (err error) { + if response != nil && response.Body != nil { + var err error + defer func() { + if closeError := response.Body.Close(); closeError != nil { + err = closeError + } + }() + body, err := io.ReadAll(response.Body) + if err != nil { + return err + } + body = trimByteOrderMark(body) + return json.Unmarshal(body, &v) + } + return nil +} + +// Returns slice of body without utf-8 byte order mark. +// If BOM does not exist body is returned unchanged. +func trimByteOrderMark(body []byte) []byte { + return bytes.TrimPrefix(body, []byte("\xef\xbb\xbf")) +} + +func actionsServiceAdminTokenExpiresAt(jwtToken string) (*time.Time, error) { + type JwtClaims struct { + jwt.RegisteredClaims + } + token, _, err := jwt.NewParser().ParseUnverified(jwtToken, &JwtClaims{}) + if err != nil { + return nil, fmt.Errorf("failed to parse jwt token: %w", err) + } + + if claims, ok := token.Claims.(*JwtClaims); ok { + return &claims.ExpiresAt.Time, nil + } + + return nil, fmt.Errorf("failed to parse token claims to get expire at") +} + +func (c *Client) refreshTokenIfNeeded(ctx context.Context) error { + c.mu.Lock() + defer c.mu.Unlock() + + aboutToExpire := time.Now().Add(60 * time.Second).After(*c.ActionsServiceAdminTokenExpiresAt) + if !aboutToExpire { + return nil + } + + c.logger.Info("Admin token is about to expire, refreshing it", "githubConfigUrl", c.githubConfigURL) + rt, err := c.getRunnerRegistrationToken(ctx, c.githubConfigURL, *c.creds) + if err != nil { + return fmt.Errorf("failed to get runner registration token on fresh: %w", err) + } + + adminConnInfo, err := c.getActionsServiceAdminConnection(ctx, rt, c.githubConfigURL) + if err != nil { + return fmt.Errorf("failed to get actions service admin connection on fresh: %w", err) + } + + c.ActionsServiceURL = adminConnInfo.ActionsServiceUrl + c.ActionsServiceAdminToken = adminConnInfo.AdminToken + c.ActionsServiceAdminTokenExpiresAt, err = actionsServiceAdminTokenExpiresAt(*adminConnInfo.AdminToken) + if err != nil { + return fmt.Errorf("failed to get admin token expire at on refresh: %w", err) + } + + return nil +} diff --git a/github/actions/client_generate_jit_test.go b/github/actions/client_generate_jit_test.go new file mode 100644 index 0000000000..1b1d733047 --- /dev/null +++ b/github/actions/client_generate_jit_test.go @@ -0,0 +1,75 @@ +package actions_test + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/go-retryablehttp" + "github.com/stretchr/testify/assert" +) + +func TestGenerateJitRunnerConfig(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + + t.Run("Get JIT Config for Runner", func(t *testing.T) { + name := "Get JIT Config for Runner" + want := &actions.RunnerScaleSetJitRunnerConfig{} + response := []byte(`{"count":1,"value":[{"id":1,"name":"scale-set-name"}]}`) + + runnerSettings := &actions.RunnerScaleSetJitRunnerSetting{} + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(response) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + got, err := actionsClient.GenerateJitRunnerConfig(context.Background(), runnerSettings, 1) + if err != nil { + t.Fatalf("GenerateJitRunnerConfig got unexepected error, %v", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("GenerateJitRunnerConfig(%v) mismatch (-want +got):\n%s", name, diff) + } + }) + + t.Run("Default retries on server error", func(t *testing.T) { + runnerSettings := &actions.RunnerScaleSetJitRunnerSetting{} + + retryClient := retryablehttp.NewClient() + retryClient.RetryWaitMax = 1 * time.Millisecond + retryClient.RetryMax = 1 + + actualRetry := 0 + expectedRetry := retryClient.RetryMax + 1 + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + + httpClient := retryClient.StandardClient() + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + _, _ = actionsClient.GenerateJitRunnerConfig(context.Background(), runnerSettings, 1) + + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }) +} diff --git a/github/actions/client_job_acquisition_test.go b/github/actions/client_job_acquisition_test.go new file mode 100644 index 0000000000..b7df3abb57 --- /dev/null +++ b/github/actions/client_job_acquisition_test.go @@ -0,0 +1,144 @@ +package actions_test + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/go-retryablehttp" + "github.com/stretchr/testify/assert" +) + +func TestAcquireJobs(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + + t.Run("Acquire Job", func(t *testing.T) { + name := "Acquire Job" + + want := []int64{1} + response := []byte(`{"value": [1]}`) + + session := &actions.RunnerScaleSetSession{ + RunnerScaleSet: &actions.RunnerScaleSet{Id: 1}, + MessageQueueAccessToken: "abc", + } + requestIDs := want + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(response) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + } + + got, err := actionsClient.AcquireJobs(context.Background(), session.RunnerScaleSet.Id, session.MessageQueueAccessToken, requestIDs) + if err != nil { + t.Fatalf("CreateRunnerScaleSet got unexepected error, %v", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("GetRunnerScaleSet(%v) mismatch (-want +got):\n%s", name, diff) + } + }) + + t.Run("Default retries on server error", func(t *testing.T) { + session := &actions.RunnerScaleSetSession{ + RunnerScaleSet: &actions.RunnerScaleSet{Id: 1}, + MessageQueueAccessToken: "abc", + } + var requestIDs []int64 = []int64{1} + + retryClient := retryablehttp.NewClient() + retryClient.RetryWaitMax = 1 * time.Millisecond + retryClient.RetryMax = 1 + + actualRetry := 0 + expectedRetry := retryClient.RetryMax + 1 + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + + httpClient := retryClient.StandardClient() + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + } + + _, _ = actionsClient.AcquireJobs(context.Background(), session.RunnerScaleSet.Id, session.MessageQueueAccessToken, requestIDs) + + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }) +} + +func TestGetAcquirableJobs(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + + t.Run("Acquire Job", func(t *testing.T) { + name := "Acquire Job" + + want := &actions.AcquirableJobList{} + response := []byte(`{"count": 0}`) + + runnerScaleSet := &actions.RunnerScaleSet{Id: 1} + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(response) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + got, err := actionsClient.GetAcquirableJobs(context.Background(), runnerScaleSet.Id) + if err != nil { + t.Fatalf("GetAcquirableJobs got unexepected error, %v", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("GetAcquirableJobs(%v) mismatch (-want +got):\n%s", name, diff) + } + }) + + t.Run("Default retries on server error", func(t *testing.T) { + runnerScaleSet := &actions.RunnerScaleSet{Id: 1} + + retryClient := retryablehttp.NewClient() + retryClient.RetryWaitMax = 1 * time.Millisecond + retryClient.RetryMax = 1 + + actualRetry := 0 + expectedRetry := retryClient.RetryMax + 1 + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + + httpClient := retryClient.StandardClient() + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + _, _ = actionsClient.GetAcquirableJobs(context.Background(), runnerScaleSet.Id) + + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }) +} diff --git a/github/actions/client_runner_scale_set_message_test.go b/github/actions/client_runner_scale_set_message_test.go new file mode 100644 index 0000000000..2252f5a728 --- /dev/null +++ b/github/actions/client_runner_scale_set_message_test.go @@ -0,0 +1,269 @@ +package actions_test + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/go-retryablehttp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetMessage(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + runnerScaleSetMessage := &actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "rssType", + } + + t.Run("Get Runner Scale Set Message", func(t *testing.T) { + want := runnerScaleSetMessage + response := []byte(`{"messageId":1,"messageType":"rssType"}`) + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(response) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + got, err := actionsClient.GetMessage(context.Background(), s.URL, token, 0) + if err != nil { + t.Fatalf("GetMessage got unexepected error, %v", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("GetMessage mismatch (-want +got):\n%s", diff) + } + }) + + t.Run("Default retries on server error", func(t *testing.T) { + retryClient := retryablehttp.NewClient() + retryClient.RetryWaitMax = 1 * time.Nanosecond + retryClient.RetryMax = 1 + + actualRetry := 0 + expectedRetry := retryClient.RetryMax + 1 + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + + httpClient := retryClient.StandardClient() + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + _, _ = actionsClient.GetMessage(context.Background(), s.URL, token, 0) + + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }) + + t.Run("Custom retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryMax := 1 + retryWaitMax := 1 * time.Nanosecond + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + RetryMax: &retryMax, + RetryWaitMax: &retryWaitMax, + } + _, _ = actionsClient.GetMessage(context.Background(), s.URL, token, 0) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) + + t.Run("Message token expired", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.GetMessage(context.Background(), s.URL, token, 0) + if err == nil { + t.Fatalf("GetMessage did not get exepected error, ") + } + var expectedErr *actions.MessageQueueTokenExpiredError + require.True(t, errors.As(err, &expectedErr)) + }, + ) + + t.Run("Status code not found", func(t *testing.T) { + want := actions.ActionsError{ + Message: "Request returned status: 404 Not Found", + StatusCode: 404, + } + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.GetMessage(context.Background(), s.URL, token, 0) + if err == nil { + t.Fatalf("GetMessage did not get exepected error, ") + } + if diff := cmp.Diff(want.Error(), err.Error()); diff != "" { + t.Errorf("GetMessage mismatch (-want +got):\n%s", diff) + } + }, + ) + + t.Run("Error when Content-Type is text/plain", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Header().Set("Content-Type", "text/plain") + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.GetMessage(context.Background(), s.URL, token, 0) + if err == nil { + t.Fatalf("GetMessage did not get exepected error,") + } + }, + ) +} + +func TestDeleteMessage(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + runnerScaleSetMessage := &actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "rssType", + } + + t.Run("Delete existing message", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + err := actionsClient.DeleteMessage(context.Background(), s.URL, token, runnerScaleSetMessage.MessageId) + if err != nil { + t.Fatalf("DeleteMessage got unexepected error, %v", err) + } + }, + ) + + t.Run("Message token expired", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + err := actionsClient.DeleteMessage(context.Background(), s.URL, token, 0) + if err == nil { + t.Fatalf("DeleteMessage did not get exepected error, ") + } + var expectedErr *actions.MessageQueueTokenExpiredError + require.True(t, errors.As(err, &expectedErr)) + }, + ) + + t.Run("Error when Content-Type is text/plain", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Header().Set("Content-Type", "text/plain") + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + err := actionsClient.DeleteMessage(context.Background(), s.URL, token, runnerScaleSetMessage.MessageId) + if err == nil { + t.Fatalf("DeleteMessage did not get exepected error") + } + var expectedErr *actions.ActionsError + require.True(t, errors.As(err, &expectedErr)) + }, + ) + + t.Run("Default retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryClient := retryablehttp.NewClient() + retryMax := 1 + retryClient.RetryWaitMax = time.Nanosecond + retryClient.RetryMax = retryMax + httpClient := retryClient.StandardClient() + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _ = actionsClient.DeleteMessage(context.Background(), s.URL, token, runnerScaleSetMessage.MessageId) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) + + t.Run("No message found", func(t *testing.T) { + want := (*actions.RunnerScaleSetMessage)(nil) + rsl, err := json.Marshal(want) + if err != nil { + t.Fatalf("%v", err) + } + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(rsl) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + err = actionsClient.DeleteMessage(context.Background(), s.URL, token, runnerScaleSetMessage.MessageId+1) + var expectedErr *actions.ActionsError + require.True(t, errors.As(err, &expectedErr)) + }, + ) +} diff --git a/github/actions/client_runner_scale_set_session_test.go b/github/actions/client_runner_scale_set_session_test.go new file mode 100644 index 0000000000..e3fc31936a --- /dev/null +++ b/github/actions/client_runner_scale_set_session_test.go @@ -0,0 +1,244 @@ +package actions_test + +import ( + "context" + "errors" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func TestCreateMessageSession(t *testing.T) { + t.Run("CreateMessageSession unmarshals correctly", func(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + owner := "foo" + runnerScaleSet := actions.RunnerScaleSet{ + Id: 1, + Name: "ScaleSet", + CreatedOn: time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), + RunnerSetting: actions.RunnerSetting{}, + } + + want := &actions.RunnerScaleSetSession{ + OwnerName: "foo", + RunnerScaleSet: &actions.RunnerScaleSet{ + Id: 1, + Name: "ScaleSet", + }, + MessageQueueUrl: "http://fake.actions.github.com/123", + MessageQueueAccessToken: "fake.jwt.here", + } + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + resp := []byte(`{ + "ownerName": "foo", + "runnerScaleSet": { + "id": 1, + "name": "ScaleSet" + }, + "messageQueueUrl": "http://fake.actions.github.com/123", + "messageQueueAccessToken": "fake.jwt.here" + }`) + w.Write(resp) + })) + defer srv.Close() + + retryMax := 1 + retryWaitMax := 1 * time.Microsecond + + actionsClient := actions.Client{ + ActionsServiceURL: &srv.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + RetryMax: &retryMax, + RetryWaitMax: &retryWaitMax, + } + + got, err := actionsClient.CreateMessageSession(context.Background(), runnerScaleSet.Id, owner) + if err != nil { + t.Fatalf("CreateMessageSession got unexpected error: %v", err) + } + + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("CreateMessageSession got unexpected diff: -want +got: %v", diff) + } + }) + + t.Run("CreateMessageSession unmarshals errors into ActionsError", func(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + owner := "foo" + runnerScaleSet := actions.RunnerScaleSet{ + Id: 1, + Name: "ScaleSet", + CreatedOn: time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), + RunnerSetting: actions.RunnerSetting{}, + } + + want := &actions.ActionsError{ + ExceptionName: "CSharpExceptionNameHere", + Message: "could not do something", + StatusCode: http.StatusBadRequest, + } + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusBadRequest) + resp := []byte(`{"typeName": "CSharpExceptionNameHere","message": "could not do something"}`) + w.Write(resp) + })) + defer srv.Close() + + retryMax := 1 + retryWaitMax := 1 * time.Microsecond + + actionsClient := actions.Client{ + ActionsServiceURL: &srv.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + RetryMax: &retryMax, + RetryWaitMax: &retryWaitMax, + } + + got, err := actionsClient.CreateMessageSession(context.Background(), runnerScaleSet.Id, owner) + if err == nil { + t.Fatalf("CreateMessageSession did not get expected error: %v", got) + } + + errorTypeForComparison := &actions.ActionsError{} + if isActionsError := errors.As(err, &errorTypeForComparison); !isActionsError { + t.Fatalf("CreateMessageSession expected to be able to parse the error into ActionsError type: %v", err) + } + + gotErr := err.(*actions.ActionsError) + + if diff := cmp.Diff(want, gotErr); diff != "" { + t.Fatalf("CreateMessageSession got unexpected diff: -want +got: %v", diff) + } + }) + + t.Run("CreateMessageSession call is retried the correct amount of times", func(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + owner := "foo" + runnerScaleSet := actions.RunnerScaleSet{ + Id: 1, + Name: "ScaleSet", + CreatedOn: time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), + RunnerSetting: actions.RunnerSetting{}, + } + + gotRetries := 0 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + gotRetries++ + })) + defer srv.Close() + + retryMax := 3 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + + wantRetries := retryMax + 1 + + actionsClient := actions.Client{ + ActionsServiceURL: &srv.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + RetryMax: &retryMax, + RetryWaitMax: &retryWaitMax, + } + + _, _ = actionsClient.CreateMessageSession(context.Background(), runnerScaleSet.Id, owner) + + assert.Equalf(t, gotRetries, wantRetries, "CreateMessageSession got unexpected retry count: got=%v, want=%v", gotRetries, wantRetries) + }) +} + +func TestDeleteMessageSession(t *testing.T) { + t.Run("DeleteMessageSession call is retried the correct amount of times", func(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + runnerScaleSet := actions.RunnerScaleSet{ + Id: 1, + Name: "ScaleSet", + CreatedOn: time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), + RunnerSetting: actions.RunnerSetting{}, + } + + gotRetries := 0 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + gotRetries++ + })) + defer srv.Close() + + retryMax := 3 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + + wantRetries := retryMax + 1 + + actionsClient := actions.Client{ + ActionsServiceURL: &srv.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + RetryMax: &retryMax, + RetryWaitMax: &retryWaitMax, + } + + sessionId := uuid.New() + + _ = actionsClient.DeleteMessageSession(context.Background(), runnerScaleSet.Id, &sessionId) + + assert.Equalf(t, gotRetries, wantRetries, "CreateMessageSession got unexpected retry count: got=%v, want=%v", gotRetries, wantRetries) + }) +} + +func TestRefreshMessageSession(t *testing.T) { + t.Run("RefreshMessageSession call is retried the correct amount of times", func(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + runnerScaleSet := actions.RunnerScaleSet{ + Id: 1, + Name: "ScaleSet", + CreatedOn: time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), + RunnerSetting: actions.RunnerSetting{}, + } + + gotRetries := 0 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + gotRetries++ + })) + defer srv.Close() + + retryMax := 3 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + + wantRetries := retryMax + 1 + + actionsClient := actions.Client{ + ActionsServiceURL: &srv.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + RetryMax: &retryMax, + RetryWaitMax: &retryWaitMax, + } + + sessionId := uuid.New() + + _, _ = actionsClient.RefreshMessageSession(context.Background(), runnerScaleSet.Id, &sessionId) + + assert.Equalf(t, gotRetries, wantRetries, "CreateMessageSession got unexpected retry count: got=%v, want=%v", gotRetries, wantRetries) + }) +} diff --git a/github/actions/client_runner_scale_set_test.go b/github/actions/client_runner_scale_set_test.go new file mode 100644 index 0000000000..fb4ba2c532 --- /dev/null +++ b/github/actions/client_runner_scale_set_test.go @@ -0,0 +1,858 @@ +package actions_test + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/go-retryablehttp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetRunnerScaleSet(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + scaleSetName := "ScaleSet" + runnerScaleSet := actions.RunnerScaleSet{Id: 1, Name: scaleSetName} + + t.Run("Get existing scale set", func(t *testing.T) { + want := &runnerScaleSet + runnerScaleSetsResp := []byte(`{"count":1,"value":[{"id":1,"name":"ScaleSet"}]}`) + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(runnerScaleSetsResp) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + got, err := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) + if err != nil { + t.Fatalf("CreateRunnerScaleSet got unexepected error, %v", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("GetRunnerScaleSet(%v) mismatch (-want +got):\n%s", scaleSetName, diff) + } + }, + ) + + t.Run("GetRunnerScaleSet calls correct url", func(t *testing.T) { + runnerScaleSetsResp := []byte(`{"count":1,"value":[{"id":1,"name":"ScaleSet"}]}`) + url := url.URL{} + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(runnerScaleSetsResp) + url = *r.URL + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) + if err != nil { + t.Fatalf("CreateRunnerScaleSet got unexepected error, %v", err) + } + + u := url.String() + expectedUrl := fmt.Sprintf("/_apis/runtime/runnerscalesets?name=%s&api-version=6.0-preview", scaleSetName) + assert.Equal(t, expectedUrl, u) + + }, + ) + + t.Run("Status code not found", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) + if err == nil { + t.Fatalf("GetRunnerScaleSet did not get exepected error, ") + } + }, + ) + + t.Run("Error when Content-Type is text/plain", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Header().Set("Content-Type", "text/plain") + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) + if err == nil { + t.Fatalf("GetRunnerScaleSet did not get exepected error,") + } + }, + ) + + t.Run("Default retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryClient := retryablehttp.NewClient() + retryMax := 1 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + retryClient.RetryWaitMax = retryWaitMax + retryClient.RetryMax = retryMax + httpClient := retryClient.StandardClient() + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, _ = actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) + + t.Run("Custom retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryMax := 1 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + RetryMax: &retryMax, + RetryWaitMax: &retryWaitMax, + } + _, _ = actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) + + t.Run("RunnerScaleSet count is zero", func(t *testing.T) { + want := (*actions.RunnerScaleSet)(nil) + runnerScaleSetsResp := []byte(`{"count":0,"value":[{"id":1,"name":"ScaleSet"}]}`) + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(runnerScaleSetsResp) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + got, _ := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("GetRunnerScaleSet(%v) mismatch (-want +got):\n%s", scaleSetName, diff) + } + + }, + ) + + t.Run("Multiple runner scale sets found", func(t *testing.T) { + wantErr := fmt.Errorf("multiple runner scale sets found with name %s", scaleSetName) + runnerScaleSetsResp := []byte(`{"count":2,"value":[{"id":1,"name":"ScaleSet"}]}`) + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(runnerScaleSetsResp) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) + + if err == nil { + t.Fatalf("GetRunnerScaleSet did not get exepected error, %v", wantErr) + } + + if diff := cmp.Diff(wantErr.Error(), err.Error()); diff != "" { + t.Errorf("GetRunnerScaleSet(%v) mismatch (-want +got):\n%s", scaleSetName, diff) + } + + }, + ) +} + +func TestGetRunnerScaleSetById(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + scaleSetCreationDateTime := time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC) + runnerScaleSet := actions.RunnerScaleSet{Id: 1, Name: "ScaleSet", CreatedOn: scaleSetCreationDateTime, RunnerSetting: actions.RunnerSetting{}} + + t.Run("Get existing scale set by Id", func(t *testing.T) { + want := &runnerScaleSet + rsl, err := json.Marshal(want) + if err != nil { + t.Fatalf("%v", err) + } + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(rsl) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + got, err := actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) + if err != nil { + t.Fatalf("GetRunnerScaleSetById got unexepected error, %v", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("GetRunnerScaleSetById(%d) mismatch (-want +got):\n%s", runnerScaleSet.Id, diff) + } + }, + ) + + t.Run("GetRunnerScaleSetById calls correct url", func(t *testing.T) { + rsl, err := json.Marshal(&runnerScaleSet) + if err != nil { + t.Fatalf("%v", err) + } + url := url.URL{} + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(rsl) + url = *r.URL + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err = actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) + if err != nil { + t.Fatalf("GetRunnerScaleSetById got unexepected error, %v", err) + } + + u := url.String() + expectedUrl := fmt.Sprintf("/_apis/runtime/runnerscalesets/%d?api-version=6.0-preview", runnerScaleSet.Id) + assert.Equal(t, expectedUrl, u) + + }, + ) + + t.Run("Status code not found", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) + if err == nil { + t.Fatalf("GetRunnerScaleSetById did not get exepected error, ") + } + }, + ) + + t.Run("Error when Content-Type is text/plain", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Header().Set("Content-Type", "text/plain") + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) + if err == nil { + t.Fatalf("GetRunnerScaleSetById did not get exepected error,") + } + }, + ) + + t.Run("Default retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryClient := retryablehttp.NewClient() + retryMax := 1 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + retryClient.RetryWaitMax = retryWaitMax + retryClient.RetryMax = retryMax + httpClient := retryClient.StandardClient() + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, _ = actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) + + t.Run("Custom retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryMax := 1 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + RetryMax: &retryMax, + RetryWaitMax: &retryWaitMax, + } + _, _ = actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) + + t.Run("No RunnerScaleSet found", func(t *testing.T) { + want := (*actions.RunnerScaleSet)(nil) + rsl, err := json.Marshal(want) + if err != nil { + t.Fatalf("%v", err) + } + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(rsl) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + got, _ := actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("GetRunnerScaleSetById(%v) mismatch (-want +got):\n%s", runnerScaleSet.Id, diff) + } + + }, + ) +} + +func TestCreateRunnerScaleSet(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + scaleSetCreationDateTime := time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC) + runnerScaleSet := actions.RunnerScaleSet{Id: 1, Name: "ScaleSet", CreatedOn: scaleSetCreationDateTime, RunnerSetting: actions.RunnerSetting{}} + + t.Run("Create runner scale set", func(t *testing.T) { + want := &runnerScaleSet + rsl, err := json.Marshal(want) + if err != nil { + t.Fatalf("%v", err) + } + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(rsl) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + got, err := actionsClient.CreateRunnerScaleSet(context.Background(), &runnerScaleSet) + if err != nil { + t.Fatalf("CreateRunnerScaleSet got exepected error, %v", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("CreateRunnerScaleSet(%d) mismatch (-want +got):\n%s", runnerScaleSet.Id, diff) + } + }, + ) + + t.Run("CreateRunnerScaleSet calls correct url", func(t *testing.T) { + rsl, err := json.Marshal(&runnerScaleSet) + if err != nil { + t.Fatalf("%v", err) + } + url := url.URL{} + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(rsl) + url = *r.URL + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err = actionsClient.CreateRunnerScaleSet(context.Background(), &runnerScaleSet) + if err != nil { + t.Fatalf("CreateRunnerScaleSet got unexepected error, %v", err) + } + + u := url.String() + expectedUrl := "/_apis/runtime/runnerscalesets?api-version=6.0-preview" + assert.Equal(t, expectedUrl, u) + + }, + ) + + t.Run("Error when Content-Type is text/plain", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Header().Set("Content-Type", "text/plain") + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.CreateRunnerScaleSet(context.Background(), &runnerScaleSet) + if err == nil { + t.Fatalf("CreateRunnerScaleSet did not get exepected error, %v", &actions.ActionsError{}) + } + var expectedErr *actions.ActionsError + require.True(t, errors.As(err, &expectedErr)) + }, + ) + + t.Run("Default retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryClient := retryablehttp.NewClient() + retryMax := 1 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + retryClient.RetryMax = retryMax + retryClient.RetryWaitMax = retryWaitMax + + httpClient := retryClient.StandardClient() + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, _ = actionsClient.CreateRunnerScaleSet(context.Background(), &runnerScaleSet) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) + + t.Run("Custom retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryMax := 1 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + RetryMax: &retryMax, + RetryWaitMax: &retryWaitMax, + } + _, _ = actionsClient.CreateRunnerScaleSet(context.Background(), &runnerScaleSet) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) +} + +func TestUpdateRunnerScaleSet(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + scaleSetCreationDateTime := time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC) + runnerScaleSet := actions.RunnerScaleSet{Id: 1, Name: "ScaleSet", CreatedOn: scaleSetCreationDateTime, RunnerSetting: actions.RunnerSetting{}} + + t.Run("Update existing scale set", func(t *testing.T) { + want := &runnerScaleSet + rsl, err := json.Marshal(want) + if err != nil { + t.Fatalf("%v", err) + } + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(rsl) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + got, err := actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, want) + if err != nil { + t.Fatalf("UpdateRunnerScaleSet got exepected error, %v", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("UpdateRunnerScaleSet(%d) mismatch (-want +got):\n%s", runnerScaleSet.Id, diff) + } + }, + ) + + t.Run("UpdateRunnerScaleSet calls correct url", func(t *testing.T) { + rsl, err := json.Marshal(&runnerScaleSet) + if err != nil { + t.Fatalf("%v", err) + } + url := url.URL{} + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(rsl) + url = *r.URL + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err = actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) + if err != nil { + t.Fatalf("UpdateRunnerScaleSet got unexepected error, %v", err) + } + + u := url.String() + expectedUrl := fmt.Sprintf("/_apis/runtime/runnerscalesets/%d?api-version=6.0-preview", runnerScaleSet.Id) + assert.Equal(t, expectedUrl, u) + + }, + ) + + t.Run("Status code not found", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) + if err == nil { + t.Fatalf("UpdateRunnerScaleSet did not get exepected error,") + } + var expectedErr *actions.ActionsError + require.True(t, errors.As(err, &expectedErr)) + }, + ) + + t.Run("Error when Content-Type is text/plain", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Header().Set("Content-Type", "text/plain") + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, err := actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) + if err == nil { + t.Fatalf("UpdateRunnerScaleSet did not get exepected error") + } + var expectedErr *actions.ActionsError + require.True(t, errors.As(err, &expectedErr)) + }, + ) + + t.Run("Default retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryClient := retryablehttp.NewClient() + retryMax := 1 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + retryClient.RetryWaitMax = retryWaitMax + retryClient.RetryMax = retryMax + httpClient := retryClient.StandardClient() + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _, _ = actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) + + t.Run("Custom retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryMax := 1 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + RetryMax: &retryMax, + RetryWaitMax: &retryWaitMax, + } + _, _ = actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) + + t.Run("No RunnerScaleSet found", func(t *testing.T) { + want := (*actions.RunnerScaleSet)(nil) + rsl, err := json.Marshal(want) + if err != nil { + t.Fatalf("%v", err) + } + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(rsl) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + got, err := actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) + if err != nil { + t.Fatalf("UpdateRunnerScaleSet got unexepected error, %v", err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("UpdateRunnerScaleSet(%v) mismatch (-want +got):\n%s", runnerScaleSet.Id, diff) + } + + }, + ) +} + +func TestDeleteRunnerScaleSet(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + scaleSetCreationDateTime := time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC) + runnerScaleSet := actions.RunnerScaleSet{Id: 1, Name: "ScaleSet", CreatedOn: scaleSetCreationDateTime, RunnerSetting: actions.RunnerSetting{}} + + t.Run("Delete existing scale set", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + err := actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) + if err != nil { + t.Fatalf("DeleteRunnerScaleSet got unexepected error, %v", err) + } + }, + ) + + t.Run("DeleteRunnerScaleSet calls correct url", func(t *testing.T) { + url := url.URL{} + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + url = *r.URL + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + err := actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) + if err != nil { + t.Fatalf("DeleteRunnerScaleSet got unexepected error, %v", err) + } + + u := url.String() + expectedUrl := fmt.Sprintf("/_apis/runtime/runnerscalesets/%d?api-version=6.0-preview", runnerScaleSet.Id) + assert.Equal(t, expectedUrl, u) + + }, + ) + + t.Run("Status code not found", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + err := actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) + if err == nil { + t.Fatalf("DeleteRunnerScaleSet did not get exepected error, ") + } + var expectedErr *actions.ActionsError + require.True(t, errors.As(err, &expectedErr)) + }, + ) + + t.Run("Error when Content-Type is text/plain", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Header().Set("Content-Type", "text/plain") + })) + defer s.Close() + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + err := actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) + if err == nil { + t.Fatalf("DeleteRunnerScaleSet did not get exepected error") + } + var expectedErr *actions.ActionsError + require.True(t, errors.As(err, &expectedErr)) + }, + ) + + t.Run("Default retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryClient := retryablehttp.NewClient() + retryMax := 1 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + retryClient.RetryWaitMax = retryWaitMax + retryClient.RetryMax = retryMax + httpClient := retryClient.StandardClient() + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + _ = actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) + + t.Run("Custom retries on server error", func(t *testing.T) { + actualRetry := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + retryMax := 1 + retryWaitMax, err := time.ParseDuration("1µs") + if err != nil { + t.Fatalf("%v", err) + } + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + RetryMax: &retryMax, + RetryWaitMax: &retryWaitMax, + } + _ = actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) + expectedRetry := retryMax + 1 + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }, + ) + + t.Run("No RunnerScaleSet found", func(t *testing.T) { + want := (*actions.RunnerScaleSet)(nil) + rsl, err := json.Marshal(want) + if err != nil { + t.Fatalf("%v", err) + } + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(rsl) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + err = actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) + var expectedErr *actions.ActionsError + require.True(t, errors.As(err, &expectedErr)) + }, + ) +} diff --git a/github/actions/client_runner_test.go b/github/actions/client_runner_test.go new file mode 100644 index 0000000000..a8184b57b4 --- /dev/null +++ b/github/actions/client_runner_test.go @@ -0,0 +1,219 @@ +package actions_test + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/go-retryablehttp" + "github.com/stretchr/testify/assert" +) + +var tokenExpireAt = time.Now().Add(10 * time.Minute) + +func TestGetRunner(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + + t.Run("Get Runner", func(t *testing.T) { + name := "Get Runner" + var runnerID int64 = 1 + want := &actions.RunnerReference{ + Id: int(runnerID), + Name: "self-hosted-ubuntu", + } + response := []byte(`{"id": 1, "name": "self-hosted-ubuntu"}`) + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(response) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + got, err := actionsClient.GetRunner(context.Background(), runnerID) + if err != nil { + t.Fatalf("GetRunner got unexepected error, %v", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("GetRunner(%v) mismatch (-want +got):\n%s", name, diff) + } + }) + + t.Run("Default retries on server error", func(t *testing.T) { + var runnerID int64 = 1 + retryClient := retryablehttp.NewClient() + retryClient.RetryWaitMax = 1 * time.Millisecond + retryClient.RetryMax = 1 + + actualRetry := 0 + expectedRetry := retryClient.RetryMax + 1 + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + + httpClient := retryClient.StandardClient() + + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + _, _ = actionsClient.GetRunner(context.Background(), runnerID) + + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }) +} + +func TestGetRunnerByName(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + + t.Run("Get Runner by Name", func(t *testing.T) { + var runnerID int64 = 1 + var runnerName string = "self-hosted-ubuntu" + want := &actions.RunnerReference{ + Id: int(runnerID), + Name: runnerName, + } + response := []byte(`{"count": 1, "value": [{"id": 1, "name": "self-hosted-ubuntu"}]}`) + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(response) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + got, err := actionsClient.GetRunnerByName(context.Background(), runnerName) + if err != nil { + t.Fatalf("GetRunnerByName got unexepected error, %v", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("GetRunnerByName(%v) mismatch (-want +got):\n%s", runnerName, diff) + } + }) + + t.Run("Get Runner by name with not exist runner", func(t *testing.T) { + var runnerName string = "self-hosted-ubuntu" + response := []byte(`{"count": 0, "value": []}`) + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(response) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + got, err := actionsClient.GetRunnerByName(context.Background(), runnerName) + if err != nil { + t.Fatalf("GetRunnerByName got unexepected error, %v", err) + } + + if diff := cmp.Diff((*actions.RunnerReference)(nil), got); diff != "" { + t.Errorf("GetRunnerByName(%v) mismatch (-want +got):\n%s", runnerName, diff) + } + }) + + t.Run("Default retries on server error", func(t *testing.T) { + var runnerName string = "self-hosted-ubuntu" + retryClient := retryablehttp.NewClient() + retryClient.RetryWaitMax = 1 * time.Millisecond + retryClient.RetryMax = 1 + + actualRetry := 0 + expectedRetry := retryClient.RetryMax + 1 + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + + httpClient := retryClient.StandardClient() + + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + _, _ = actionsClient.GetRunnerByName(context.Background(), runnerName) + + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }) +} + +func TestDeleteRunner(t *testing.T) { + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + + t.Run("Delete Runner", func(t *testing.T) { + var runnerID int64 = 1 + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + defer s.Close() + + actionsClient := actions.Client{ + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + if err := actionsClient.RemoveRunner(context.Background(), runnerID); err != nil { + t.Fatalf("RemoveRunner got unexepected error, %v", err) + } + }) + + t.Run("Default retries on server error", func(t *testing.T) { + var runnerID int64 = 1 + + retryClient := retryablehttp.NewClient() + retryClient.RetryWaitMax = 1 * time.Millisecond + retryClient.RetryMax = 1 + + actualRetry := 0 + expectedRetry := retryClient.RetryMax + 1 + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + actualRetry++ + })) + defer s.Close() + + httpClient := retryClient.StandardClient() + actionsClient := actions.Client{ + Client: httpClient, + ActionsServiceURL: &s.URL, + ActionsServiceAdminToken: &token, + ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, + } + + _ = actionsClient.RemoveRunner(context.Background(), runnerID) + + assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) + }) +} diff --git a/github/actions/errors.go b/github/actions/errors.go new file mode 100644 index 0000000000..7f483a0aae --- /dev/null +++ b/github/actions/errors.go @@ -0,0 +1,71 @@ +package actions + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" +) + +type ActionsError struct { + ExceptionName string `json:"typeName,omitempty"` + Message string `json:"message,omitempty"` + StatusCode int +} + +func (e *ActionsError) Error() string { + return fmt.Sprintf("%v - had issue communicating with Actions backend: %v", e.StatusCode, e.Message) +} + +func ParseActionsErrorFromResponse(response *http.Response) error { + if response.ContentLength == 0 { + message := "Request returned status: " + response.Status + return &ActionsError{ + ExceptionName: "unknown", + Message: message, + StatusCode: response.StatusCode, + } + } + + defer response.Body.Close() + body, err := io.ReadAll(response.Body) + if err != nil { + return err + } + + body = trimByteOrderMark(body) + contentType, ok := response.Header["Content-Type"] + if ok && len(contentType) > 0 && strings.Contains(contentType[0], "text/plain") { + message := string(body) + statusCode := response.StatusCode + return &ActionsError{ + Message: message, + StatusCode: statusCode, + } + } + + actionsError := &ActionsError{StatusCode: response.StatusCode} + if err := json.Unmarshal(body, &actionsError); err != nil { + return err + } + + return actionsError +} + +type MessageQueueTokenExpiredError struct { + msg string +} + +func (e *MessageQueueTokenExpiredError) Error() string { + return e.msg +} + +type HttpClientSideError struct { + msg string + Code int +} + +func (e *HttpClientSideError) Error() string { + return e.msg +} diff --git a/github/actions/fake/client.go b/github/actions/fake/client.go new file mode 100644 index 0000000000..fc2b75fb6c --- /dev/null +++ b/github/actions/fake/client.go @@ -0,0 +1,235 @@ +package fake + +import ( + "context" + "time" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/google/uuid" +) + +type Option func(*FakeClient) + +func WithGetRunnerScaleSetResult(scaleSet *actions.RunnerScaleSet, err error) Option { + return func(f *FakeClient) { + f.getRunnerScaleSetResult.RunnerScaleSet = scaleSet + f.getRunnerScaleSetResult.err = err + } +} + +func WithGetRunner(runner *actions.RunnerReference, err error) Option { + return func(f *FakeClient) { + f.getRunnerResult.RunnerReference = runner + f.getRunnerResult.err = err + } +} + +var defaultRunnerScaleSet = &actions.RunnerScaleSet{ + Id: 1, + Name: "testset", + RunnerGroupId: 1, + RunnerGroupName: "testgroup", + Labels: []actions.Label{{Type: "test", Name: "test"}}, + RunnerSetting: actions.RunnerSetting{}, + CreatedOn: time.Now(), + RunnerJitConfigUrl: "test.test.test", + Statistics: nil, +} + +var defaultRunnerGroup = &actions.RunnerGroup{ + ID: 1, + Name: "testgroup", + Size: 1, + IsDefault: true, +} + +var sessionID = uuid.New() + +var defaultRunnerScaleSetSession = &actions.RunnerScaleSetSession{ + SessionId: &sessionID, + OwnerName: "testowner", + RunnerScaleSet: defaultRunnerScaleSet, + MessageQueueUrl: "https://test.url/path", + MessageQueueAccessToken: "faketoken", + Statistics: nil, +} + +var defaultAcquirableJob = &actions.AcquirableJob{ + AcquireJobUrl: "https://test.url", + MessageType: "", + RunnerRequestId: 1, + RepositoryName: "testrepo", + OwnerName: "testowner", + JobWorkflowRef: "workflowref", + EventName: "testevent", + RequestLabels: []string{"test"}, +} + +var defaultAcquirableJobList = &actions.AcquirableJobList{ + Count: 1, + Jobs: []actions.AcquirableJob{*defaultAcquirableJob}, +} + +var defaultRunnerReference = &actions.RunnerReference{ + Id: 1, + Name: "testrunner", + RunnerScaleSetId: 1, +} + +var defaultRunnerScaleSetMessage = &actions.RunnerScaleSetMessage{ + MessageId: 1, + MessageType: "test", + Body: "{}", + Statistics: nil, +} + +var defaultRunnerScaleSetJitRunnerConfig = &actions.RunnerScaleSetJitRunnerConfig{ + Runner: defaultRunnerReference, + EncodedJITConfig: "test", +} + +// FakeClient implements actions service +type FakeClient struct { + getRunnerScaleSetResult struct { + *actions.RunnerScaleSet + err error + } + getRunnerScaleSetByIdResult struct { + *actions.RunnerScaleSet + err error + } + getRunnerGroupByNameResult struct { + *actions.RunnerGroup + err error + } + + createRunnerScaleSetResult struct { + *actions.RunnerScaleSet + err error + } + createMessageSessionResult struct { + *actions.RunnerScaleSetSession + err error + } + deleteMessageSessionResult struct { + err error + } + refreshMessageSessionResult struct { + *actions.RunnerScaleSetSession + err error + } + acquireJobsResult struct { + ids []int64 + err error + } + getAcquirableJobsResult struct { + *actions.AcquirableJobList + err error + } + getMessageResult struct { + *actions.RunnerScaleSetMessage + err error + } + deleteMessageResult struct { + err error + } + generateJitRunnerConfigResult struct { + *actions.RunnerScaleSetJitRunnerConfig + err error + } + getRunnerResult struct { + *actions.RunnerReference + err error + } + getRunnerByNameResult struct { + *actions.RunnerReference + err error + } + removeRunnerResult struct { + err error + } +} + +func NewFakeClient(options ...Option) actions.ActionsService { + f := &FakeClient{} + f.applyDefaults() + for _, opt := range options { + opt(f) + } + return f +} + +func (f *FakeClient) applyDefaults() { + f.getRunnerScaleSetResult.RunnerScaleSet = defaultRunnerScaleSet + f.getRunnerScaleSetByIdResult.RunnerScaleSet = defaultRunnerScaleSet + f.getRunnerGroupByNameResult.RunnerGroup = defaultRunnerGroup + f.createRunnerScaleSetResult.RunnerScaleSet = defaultRunnerScaleSet + f.createMessageSessionResult.RunnerScaleSetSession = defaultRunnerScaleSetSession + f.refreshMessageSessionResult.RunnerScaleSetSession = defaultRunnerScaleSetSession + f.acquireJobsResult.ids = []int64{1} + f.getAcquirableJobsResult.AcquirableJobList = defaultAcquirableJobList + f.getMessageResult.RunnerScaleSetMessage = defaultRunnerScaleSetMessage + f.generateJitRunnerConfigResult.RunnerScaleSetJitRunnerConfig = defaultRunnerScaleSetJitRunnerConfig + f.getRunnerResult.RunnerReference = defaultRunnerReference + f.getRunnerByNameResult.RunnerReference = defaultRunnerReference +} + +func (f *FakeClient) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*actions.RunnerScaleSet, error) { + return f.getRunnerScaleSetResult.RunnerScaleSet, f.getRunnerScaleSetResult.err +} + +func (f *FakeClient) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int) (*actions.RunnerScaleSet, error) { + return f.getRunnerScaleSetByIdResult.RunnerScaleSet, f.getRunnerScaleSetResult.err +} + +func (f *FakeClient) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (*actions.RunnerGroup, error) { + return f.getRunnerGroupByNameResult.RunnerGroup, f.getRunnerGroupByNameResult.err +} + +func (f *FakeClient) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *actions.RunnerScaleSet) (*actions.RunnerScaleSet, error) { + return f.createRunnerScaleSetResult.RunnerScaleSet, f.createRunnerScaleSetResult.err +} + +func (f *FakeClient) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) { + return f.createMessageSessionResult.RunnerScaleSetSession, f.createMessageSessionResult.err +} + +func (f *FakeClient) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error { + return f.deleteMessageSessionResult.err +} + +func (f *FakeClient) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) { + return f.refreshMessageSessionResult.RunnerScaleSetSession, f.refreshMessageSessionResult.err +} + +func (f *FakeClient) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) { + return f.acquireJobsResult.ids, f.acquireJobsResult.err +} + +func (f *FakeClient) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) { + return f.getAcquirableJobsResult.AcquirableJobList, f.getAcquirableJobsResult.err +} + +func (f *FakeClient) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*actions.RunnerScaleSetMessage, error) { + return f.getMessageResult.RunnerScaleSetMessage, f.getMessageResult.err +} + +func (f *FakeClient) DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error { + return f.deleteMessageResult.err +} + +func (f *FakeClient) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *actions.RunnerScaleSetJitRunnerSetting, scaleSetId int) (*actions.RunnerScaleSetJitRunnerConfig, error) { + return f.generateJitRunnerConfigResult.RunnerScaleSetJitRunnerConfig, f.generateJitRunnerConfigResult.err +} + +func (f *FakeClient) GetRunner(ctx context.Context, runnerId int64) (*actions.RunnerReference, error) { + return f.getRunnerResult.RunnerReference, f.getRunnerResult.err +} + +func (f *FakeClient) GetRunnerByName(ctx context.Context, runnerName string) (*actions.RunnerReference, error) { + return f.getRunnerByNameResult.RunnerReference, f.getRunnerByNameResult.err +} + +func (f *FakeClient) RemoveRunner(ctx context.Context, runnerId int64) error { + return f.removeRunnerResult.err +} diff --git a/github/actions/fake/multi_client.go b/github/actions/fake/multi_client.go new file mode 100644 index 0000000000..95c0c6fcdf --- /dev/null +++ b/github/actions/fake/multi_client.go @@ -0,0 +1,43 @@ +package fake + +import ( + "context" + + "github.com/actions/actions-runner-controller/github/actions" +) + +type MultiClientOption func(*fakeMultiClient) + +func WithDefaultClient(client actions.ActionsService, err error) MultiClientOption { + return func(f *fakeMultiClient) { + f.defaultClient = client + f.defaultErr = err + } +} + +type fakeMultiClient struct { + defaultClient actions.ActionsService + defaultErr error +} + +func NewMultiClient(opts ...MultiClientOption) actions.MultiClient { + f := &fakeMultiClient{} + + for _, opt := range opts { + opt(f) + } + + if f.defaultClient == nil { + f.defaultClient = NewFakeClient() + } + + return f +} + +func (f *fakeMultiClient) GetClientFor(ctx context.Context, githubConfigURL string, creds actions.ActionsAuth, namespace string) (actions.ActionsService, error) { + return f.defaultClient, f.defaultErr +} + +func (f *fakeMultiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData actions.KubernetesSecretData) (actions.ActionsService, error) { + return f.defaultClient, f.defaultErr +} diff --git a/github/actions/mock_ActionsService.go b/github/actions/mock_ActionsService.go new file mode 100644 index 0000000000..341dc5133b --- /dev/null +++ b/github/actions/mock_ActionsService.go @@ -0,0 +1,348 @@ +// Code generated by mockery v2.16.0. DO NOT EDIT. + +package actions + +import ( + context "context" + + uuid "github.com/google/uuid" + mock "github.com/stretchr/testify/mock" +) + +// MockActionsService is an autogenerated mock type for the ActionsService type +type MockActionsService struct { + mock.Mock +} + +// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds +func (_m *MockActionsService) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) { + ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds) + + var r0 []int64 + if rf, ok := ret.Get(0).(func(context.Context, int, string, []int64) []int64); ok { + r0 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int64) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, string, []int64) error); ok { + r1 = rf(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner +func (_m *MockActionsService) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*RunnerScaleSetSession, error) { + ret := _m.Called(ctx, runnerScaleSetId, owner) + + var r0 *RunnerScaleSetSession + if rf, ok := ret.Get(0).(func(context.Context, int, string) *RunnerScaleSetSession); ok { + r0 = rf(ctx, runnerScaleSetId, owner) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerScaleSetSession) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, string) error); ok { + r1 = rf(ctx, runnerScaleSetId, owner) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateRunnerScaleSet provides a mock function with given fields: ctx, runnerScaleSet +func (_m *MockActionsService) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) { + ret := _m.Called(ctx, runnerScaleSet) + + var r0 *RunnerScaleSet + if rf, ok := ret.Get(0).(func(context.Context, *RunnerScaleSet) *RunnerScaleSet); ok { + r0 = rf(ctx, runnerScaleSet) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerScaleSet) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *RunnerScaleSet) error); ok { + r1 = rf(ctx, runnerScaleSet) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId +func (_m *MockActionsService) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error { + ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) error); ok { + r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, messageId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId +func (_m *MockActionsService) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error { + ret := _m.Called(ctx, runnerScaleSetId, sessionId) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) error); ok { + r0 = rf(ctx, runnerScaleSetId, sessionId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GenerateJitRunnerConfig provides a mock function with given fields: ctx, jitRunnerSetting, scaleSetId +func (_m *MockActionsService) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *RunnerScaleSetJitRunnerSetting, scaleSetId int) (*RunnerScaleSetJitRunnerConfig, error) { + ret := _m.Called(ctx, jitRunnerSetting, scaleSetId) + + var r0 *RunnerScaleSetJitRunnerConfig + if rf, ok := ret.Get(0).(func(context.Context, *RunnerScaleSetJitRunnerSetting, int) *RunnerScaleSetJitRunnerConfig); ok { + r0 = rf(ctx, jitRunnerSetting, scaleSetId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerScaleSetJitRunnerConfig) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *RunnerScaleSetJitRunnerSetting, int) error); ok { + r1 = rf(ctx, jitRunnerSetting, scaleSetId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId +func (_m *MockActionsService) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*AcquirableJobList, error) { + ret := _m.Called(ctx, runnerScaleSetId) + + var r0 *AcquirableJobList + if rf, ok := ret.Get(0).(func(context.Context, int) *AcquirableJobList); ok { + r0 = rf(ctx, runnerScaleSetId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*AcquirableJobList) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, runnerScaleSetId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId +func (_m *MockActionsService) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64) (*RunnerScaleSetMessage, error) { + ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId) + + var r0 *RunnerScaleSetMessage + if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *RunnerScaleSetMessage); ok { + r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerScaleSetMessage) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok { + r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRunner provides a mock function with given fields: ctx, runnerId +func (_m *MockActionsService) GetRunner(ctx context.Context, runnerId int64) (*RunnerReference, error) { + ret := _m.Called(ctx, runnerId) + + var r0 *RunnerReference + if rf, ok := ret.Get(0).(func(context.Context, int64) *RunnerReference); ok { + r0 = rf(ctx, runnerId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerReference) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok { + r1 = rf(ctx, runnerId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRunnerByName provides a mock function with given fields: ctx, runnerName +func (_m *MockActionsService) GetRunnerByName(ctx context.Context, runnerName string) (*RunnerReference, error) { + ret := _m.Called(ctx, runnerName) + + var r0 *RunnerReference + if rf, ok := ret.Get(0).(func(context.Context, string) *RunnerReference); ok { + r0 = rf(ctx, runnerName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerReference) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, runnerName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRunnerGroupByName provides a mock function with given fields: ctx, runnerGroup +func (_m *MockActionsService) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (*RunnerGroup, error) { + ret := _m.Called(ctx, runnerGroup) + + var r0 *RunnerGroup + if rf, ok := ret.Get(0).(func(context.Context, string) *RunnerGroup); ok { + r0 = rf(ctx, runnerGroup) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerGroup) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, runnerGroup) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRunnerScaleSet provides a mock function with given fields: ctx, runnerScaleSetName +func (_m *MockActionsService) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*RunnerScaleSet, error) { + ret := _m.Called(ctx, runnerScaleSetName) + + var r0 *RunnerScaleSet + if rf, ok := ret.Get(0).(func(context.Context, string) *RunnerScaleSet); ok { + r0 = rf(ctx, runnerScaleSetName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerScaleSet) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, runnerScaleSetName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRunnerScaleSetById provides a mock function with given fields: ctx, runnerScaleSetId +func (_m *MockActionsService) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int) (*RunnerScaleSet, error) { + ret := _m.Called(ctx, runnerScaleSetId) + + var r0 *RunnerScaleSet + if rf, ok := ret.Get(0).(func(context.Context, int) *RunnerScaleSet); ok { + r0 = rf(ctx, runnerScaleSetId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerScaleSet) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, runnerScaleSetId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId +func (_m *MockActionsService) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*RunnerScaleSetSession, error) { + ret := _m.Called(ctx, runnerScaleSetId, sessionId) + + var r0 *RunnerScaleSetSession + if rf, ok := ret.Get(0).(func(context.Context, int, *uuid.UUID) *RunnerScaleSetSession); ok { + r0 = rf(ctx, runnerScaleSetId, sessionId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerScaleSetSession) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, *uuid.UUID) error); ok { + r1 = rf(ctx, runnerScaleSetId, sessionId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoveRunner provides a mock function with given fields: ctx, runnerId +func (_m *MockActionsService) RemoveRunner(ctx context.Context, runnerId int64) error { + ret := _m.Called(ctx, runnerId) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { + r0 = rf(ctx, runnerId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type mockConstructorTestingTNewMockActionsService interface { + mock.TestingT + Cleanup(func()) +} + +// NewMockActionsService creates a new instance of MockActionsService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMockActionsService(t mockConstructorTestingTNewMockActionsService) *MockActionsService { + mock := &MockActionsService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/github/actions/mock_SessionService.go b/github/actions/mock_SessionService.go new file mode 100644 index 0000000000..2706da7b53 --- /dev/null +++ b/github/actions/mock_SessionService.go @@ -0,0 +1,103 @@ +// Code generated by mockery v2.16.0. DO NOT EDIT. + +package actions + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// MockSessionService is an autogenerated mock type for the SessionService type +type MockSessionService struct { + mock.Mock +} + +// AcquireJobs provides a mock function with given fields: ctx, requestIds +func (_m *MockSessionService) AcquireJobs(ctx context.Context, requestIds []int64) ([]int64, error) { + ret := _m.Called(ctx, requestIds) + + var r0 []int64 + if rf, ok := ret.Get(0).(func(context.Context, []int64) []int64); ok { + r0 = rf(ctx, requestIds) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int64) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []int64) error); ok { + r1 = rf(ctx, requestIds) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Close provides a mock function with given fields: +func (_m *MockSessionService) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteMessage provides a mock function with given fields: ctx, messageId +func (_m *MockSessionService) DeleteMessage(ctx context.Context, messageId int64) error { + ret := _m.Called(ctx, messageId) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { + r0 = rf(ctx, messageId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetMessage provides a mock function with given fields: ctx, lastMessageId +func (_m *MockSessionService) GetMessage(ctx context.Context, lastMessageId int64) (*RunnerScaleSetMessage, error) { + ret := _m.Called(ctx, lastMessageId) + + var r0 *RunnerScaleSetMessage + if rf, ok := ret.Get(0).(func(context.Context, int64) *RunnerScaleSetMessage); ok { + r0 = rf(ctx, lastMessageId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerScaleSetMessage) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok { + r1 = rf(ctx, lastMessageId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewMockSessionService interface { + mock.TestingT + Cleanup(func()) +} + +// NewMockSessionService creates a new instance of MockSessionService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMockSessionService(t mockConstructorTestingTNewMockSessionService) *MockSessionService { + mock := &MockSessionService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/github/actions/multi_client.go b/github/actions/multi_client.go new file mode 100644 index 0000000000..1d1aad1dfa --- /dev/null +++ b/github/actions/multi_client.go @@ -0,0 +1,164 @@ +package actions + +import ( + "context" + "fmt" + "net/url" + "strconv" + "sync" + + "github.com/go-logr/logr" +) + +type MultiClient interface { + GetClientFor(ctx context.Context, githubConfigURL string, creds ActionsAuth, namespace string) (ActionsService, error) + GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData KubernetesSecretData) (ActionsService, error) +} + +type multiClient struct { + // To lock adding and removing of individual clients. + mu sync.Mutex + clients map[ActionsClientKey]*actionsClientWrapper + + logger logr.Logger + userAgent string +} + +type GitHubAppAuth struct { + AppID int64 + AppInstallationID int64 + AppPrivateKey string +} + +type ActionsAuth struct { + // GitHub App + AppCreds *GitHubAppAuth + + // GitHub PAT + Token string +} + +type ActionsClientKey struct { + ActionsURL string + Auth ActionsAuth + Namespace string +} + +type actionsClientWrapper struct { + // To lock client usage when tokens are being refreshed. + mu sync.Mutex + + client ActionsService +} + +func NewMultiClient(userAgent string, logger logr.Logger) MultiClient { + return &multiClient{ + mu: sync.Mutex{}, + clients: make(map[ActionsClientKey]*actionsClientWrapper), + logger: logger, + userAgent: userAgent, + } +} + +func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, creds ActionsAuth, namespace string) (ActionsService, error) { + m.logger.Info("retrieve actions client", "githubConfigURL", githubConfigURL, "namespace", namespace) + + parsedGitHubURL, err := url.Parse(githubConfigURL) + if err != nil { + return nil, err + } + + if creds.Token == "" && creds.AppCreds == nil { + return nil, fmt.Errorf("no credentials provided. either a PAT or GitHub App credentials should be provided") + } + + if creds.Token != "" && creds.AppCreds != nil { + return nil, fmt.Errorf("both PAT and GitHub App credentials provided. should only provide one") + } + + key := ActionsClientKey{ + ActionsURL: parsedGitHubURL.String(), + Namespace: namespace, + } + + if creds.AppCreds != nil { + key.Auth = ActionsAuth{ + AppCreds: creds.AppCreds, + } + } + + if creds.Token != "" { + key.Auth = ActionsAuth{ + Token: creds.Token, + } + } + + m.mu.Lock() + defer m.mu.Unlock() + + clientWrapper, has := m.clients[key] + if has { + m.logger.Info("using cache client", "githubConfigURL", githubConfigURL, "namespace", namespace) + return clientWrapper.client, nil + } + + m.logger.Info("creating new client", "githubConfigURL", githubConfigURL, "namespace", namespace) + + client, err := NewClient(ctx, githubConfigURL, &creds, m.userAgent, m.logger) + if err != nil { + return nil, err + } + + m.clients[key] = &actionsClientWrapper{ + mu: sync.Mutex{}, + client: client, + } + + m.logger.Info("successfully created new client", "githubConfigURL", githubConfigURL, "namespace", namespace) + + return client, nil +} + +type KubernetesSecretData map[string][]byte + +func (m *multiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData KubernetesSecretData) (ActionsService, error) { + if len(secretData) == 0 { + return nil, fmt.Errorf("must provide secret data with either PAT or GitHub App Auth") + } + + token := string(secretData["github_token"]) + hasToken := len(token) > 0 + + appID := string(secretData["github_app_id"]) + appInstallationID := string(secretData["github_app_installation_id"]) + appPrivateKey := string(secretData["github_app_private_key"]) + hasGitHubAppAuth := len(appID) > 0 && len(appInstallationID) > 0 && len(appPrivateKey) > 0 + + if hasToken && hasGitHubAppAuth { + return nil, fmt.Errorf("must provide secret with only PAT or GitHub App Auth to avoid ambiguity in client behavior") + } + + if !hasToken && !hasGitHubAppAuth { + return nil, fmt.Errorf("neither PAT nor GitHub App Auth credentials provided in secret") + } + + auth := ActionsAuth{} + + if hasToken { + auth.Token = token + return m.GetClientFor(ctx, githubConfigURL, auth, namespace) + } + + parsedAppID, err := strconv.ParseInt(appID, 10, 64) + if err != nil { + return nil, err + } + + parsedAppInstallationID, err := strconv.ParseInt(appInstallationID, 10, 64) + if err != nil { + return nil, err + } + + auth.AppCreds = &GitHubAppAuth{AppID: parsedAppID, AppInstallationID: parsedAppInstallationID, AppPrivateKey: appPrivateKey} + return m.GetClientFor(ctx, githubConfigURL, auth, namespace) +} diff --git a/github/actions/multi_client_test.go b/github/actions/multi_client_test.go new file mode 100644 index 0000000000..11aeb7fd21 --- /dev/null +++ b/github/actions/multi_client_test.go @@ -0,0 +1,163 @@ +package actions + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + "time" + + "github.com/actions/actions-runner-controller/logging" +) + +func TestAddClient(t *testing.T) { + logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err) + os.Exit(1) + } + multiClient := NewMultiClient("test-user-agent", logger).(*multiClient) + + ctx := context.Background() + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, "actions/runners/registration-token") { + w.WriteHeader(http.StatusCreated) + w.Header().Set("Content-Type", "application/json") + + token := "abc-123" + rt := ®istrationToken{Token: &token} + + if err := json.NewEncoder(w).Encode(rt); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + if strings.HasSuffix(r.URL.Path, "actions/runner-registration") { + w.Header().Set("Content-Type", "application/json") + + url := "actions.github.com/abc" + jwt := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + adminConnInfo := &ActionsServiceAdminConnection{ActionsServiceUrl: &url, AdminToken: &jwt} + + if err := json.NewEncoder(w).Encode(adminConnInfo); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + if strings.HasSuffix(r.URL.Path, "/access_tokens") { + w.Header().Set("Content-Type", "application/vnd.github+json") + + t, _ := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z07:00") + accessToken := &accessToken{ + Token: "abc-123", + ExpiresAt: t, + } + + if err := json.NewEncoder(w).Encode(accessToken); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + })) + defer srv.Close() + + want := 1 + if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{Token: "PAT"}, "namespace"); err != nil { + t.Fatal(err) + } + + want++ // New repo + if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/actions", srv.URL), ActionsAuth{Token: "PAT"}, "namespace"); err != nil { + t.Fatal(err) + } + + // Repeat + if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{Token: "PAT"}, "namespace"); err != nil { + t.Fatal(err) + } + + want++ // New namespace + if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{Token: "PAT"}, "other"); err != nil { + t.Fatal(err) + } + + want++ // New pat + if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{Token: "other"}, "other"); err != nil { + t.Fatal(err) + } + + want++ // New org + if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github", srv.URL), ActionsAuth{Token: "PAT"}, "other"); err != nil { + t.Fatal(err) + } + + // No org, repo, enterprise + if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v", srv.URL), ActionsAuth{Token: "PAT"}, "other"); err == nil { + t.Fatal(err) + } + + want++ // Test keying on GitHub App + appAuth := &GitHubAppAuth{ + AppID: 1, + AppPrivateKey: `-----BEGIN RSA PRIVATE KEY----- +MIICWgIBAAKBgHXfRT9cv9UY9fAAD4+1RshpfSSZe277urfEmPfX3/Og9zJYRk// +CZrJVD1CaBZDiIyQsNEzjta7r4UsqWdFOggiNN2E7ZTFQjMSaFkVgrzHqWuiaCBf +/BjbKPn4SMDmTzHvIe7Nel76hBdCaVgu6mYCW5jmuSH5qz/yR1U1J/WJAgMBAAEC +gYARWGWsSU3BYgbu5lNj5l0gKMXNmPhdAJYdbMTF0/KUu18k/XB7XSBgsre+vALt +I8r4RGKApoGif8P4aPYUyE8dqA1bh0X3Fj1TCz28qoUL5//dA+pigCRS20H7HM3C +ojoqF7+F+4F2sXmzFNd1NgY5RxFPYosTT7OnUiFuu2IisQJBALnMLe09LBnjuHXR +xxR65DDNxWPQLBjW3dL+ubLcwr7922l6ZIQsVjdeE0ItEUVRjjJ9/B/Jq9VJ/Lw4 +g9LCkkMCQQCiaM2f7nYmGivPo9hlAbq5lcGJ5CCYFfeeYzTxMqum7Mbqe4kk5lgb +X6gWd0Izg2nGdAEe/97DClO6VpKcPbpDAkBTR/JOJN1fvXMxXJaf13XxakrQMr+R +Yr6LlSInykyAz8lJvlLP7A+5QbHgN9NF/wh+GXqpxPwA3ukqdSqhjhWBAkBn6mDv +HPgR5xrzL6XM8y9TgaOlJAdK6HtYp6d/UOmN0+Butf6JUq07TphRT5tXNJVgemch +O5x/9UKfbrc+KyzbAkAo97TfFC+mZhU1N5fFelaRu4ikPxlp642KRUSkOh8GEkNf +jQ97eJWiWtDcsMUhcZgoB5ydHcFlrBIn6oBcpge5 +-----END RSA PRIVATE KEY-----`, + } + if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{AppCreds: appAuth}, "other"); err != nil { + t.Fatal(err) + } + + // Repeat last to verify GitHub App keys are mapped together + if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{AppCreds: appAuth}, "other"); err != nil { + t.Fatal(err) + } + + if len(multiClient.clients) != want { + t.Fatalf("GetClientFor: unexpected number of clients: got=%v want=%v", len(multiClient.clients), want) + } +} + +func TestCreateJWT(t *testing.T) { + key := `-----BEGIN RSA PRIVATE KEY----- +MIICWgIBAAKBgHXfRT9cv9UY9fAAD4+1RshpfSSZe277urfEmPfX3/Og9zJYRk// +CZrJVD1CaBZDiIyQsNEzjta7r4UsqWdFOggiNN2E7ZTFQjMSaFkVgrzHqWuiaCBf +/BjbKPn4SMDmTzHvIe7Nel76hBdCaVgu6mYCW5jmuSH5qz/yR1U1J/WJAgMBAAEC +gYARWGWsSU3BYgbu5lNj5l0gKMXNmPhdAJYdbMTF0/KUu18k/XB7XSBgsre+vALt +I8r4RGKApoGif8P4aPYUyE8dqA1bh0X3Fj1TCz28qoUL5//dA+pigCRS20H7HM3C +ojoqF7+F+4F2sXmzFNd1NgY5RxFPYosTT7OnUiFuu2IisQJBALnMLe09LBnjuHXR +xxR65DDNxWPQLBjW3dL+ubLcwr7922l6ZIQsVjdeE0ItEUVRjjJ9/B/Jq9VJ/Lw4 +g9LCkkMCQQCiaM2f7nYmGivPo9hlAbq5lcGJ5CCYFfeeYzTxMqum7Mbqe4kk5lgb +X6gWd0Izg2nGdAEe/97DClO6VpKcPbpDAkBTR/JOJN1fvXMxXJaf13XxakrQMr+R +Yr6LlSInykyAz8lJvlLP7A+5QbHgN9NF/wh+GXqpxPwA3ukqdSqhjhWBAkBn6mDv +HPgR5xrzL6XM8y9TgaOlJAdK6HtYp6d/UOmN0+Butf6JUq07TphRT5tXNJVgemch +O5x/9UKfbrc+KyzbAkAo97TfFC+mZhU1N5fFelaRu4ikPxlp642KRUSkOh8GEkNf +jQ97eJWiWtDcsMUhcZgoB5ydHcFlrBIn6oBcpge5 +-----END RSA PRIVATE KEY-----` + + auth := &GitHubAppAuth{ + AppID: 123, + AppPrivateKey: key, + } + jwt, err := createJWTForGitHubApp(auth) + if err != nil { + t.Fatal(err) + } + fmt.Println(jwt) +} diff --git a/github/actions/sessionservice.go b/github/actions/sessionservice.go new file mode 100644 index 0000000000..6ae20fa0b1 --- /dev/null +++ b/github/actions/sessionservice.go @@ -0,0 +1,14 @@ +package actions + +import ( + "context" + "io" +) + +//go:generate mockery --inpackage --name=SessionService +type SessionService interface { + GetMessage(ctx context.Context, lastMessageId int64) (*RunnerScaleSetMessage, error) + DeleteMessage(ctx context.Context, messageId int64) error + AcquireJobs(ctx context.Context, requestIds []int64) ([]int64, error) + io.Closer +} diff --git a/github/actions/types.go b/github/actions/types.go new file mode 100644 index 0000000000..1c0f825ed1 --- /dev/null +++ b/github/actions/types.go @@ -0,0 +1,153 @@ +package actions + +import ( + "time" + + "github.com/google/uuid" +) + +type AcquirableJobList struct { + Count int `json:"count"` + Jobs []AcquirableJob `json:"value"` +} + +type AcquirableJob struct { + AcquireJobUrl string `json:"acquireJobUrl"` + MessageType string `json:"messageType"` + RunnerRequestId int64 `json:"runnerRequestId"` + RepositoryName string `json:"repositoryName"` + OwnerName string `json:"ownerName"` + JobWorkflowRef string `json:"jobWorkflowRef"` + EventName string `json:"eventName"` + RequestLabels []string `json:"requestLabels"` +} + +type Int64List struct { + Count int `json:"count"` + Value []int64 `json:"value"` +} + +type JobAvailable struct { + AcquireJobUrl string `json:"acquireJobUrl"` + JobMessageBase +} + +type JobAssigned struct { + JobMessageBase +} + +type JobStarted struct { + RunnerId int `json:"runnerId"` + RunnerName string `json:"runnerName"` + JobMessageBase +} + +type JobCompleted struct { + Result string `json:"result"` + RunnerId int `json:"runnerId"` + RunnerName string `json:"runnerName"` + JobMessageBase +} + +type JobMessageType struct { + MessageType string `json:"messageType"` +} + +type JobMessageBase struct { + JobMessageType + RunnerRequestId int64 `json:"runnerRequestId"` + RepositoryName string `json:"repositoryName"` + OwnerName string `json:"ownerName"` + JobWorkflowRef string `json:"jobWorkflowRef"` + JobDisplayName string `json:"jobDisplayName"` + WorkflowRunId int64 `json:"workflowRunId"` + EventName string `json:"eventName"` + RequestLabels []string `json:"requestLabels"` +} + +type Label struct { + Type string `json:"type"` + Name string `json:"name"` +} + +type RunnerGroup struct { + ID int64 `json:"id"` + Name string `json:"name"` + Size int64 `json:"size"` + IsDefault bool `json:"isDefaultGroup"` +} + +type RunnerGroupList struct { + Count int `json:"count"` + RunnerGroups []RunnerGroup `json:"value"` +} + +type RunnerScaleSet struct { + Id int `json:"id,omitempty"` + Name string `json:"name,omitempty"` + RunnerGroupId int `json:"runnerGroupId,omitempty"` + RunnerGroupName string `json:"runnerGroupName,omitempty"` + Labels []Label `json:"labels,omitempty"` + RunnerSetting RunnerSetting `json:"RunnerSetting,omitempty"` + CreatedOn time.Time `json:"createdOn,omitempty"` + RunnerJitConfigUrl string `json:"runnerJitConfigUrl,omitempty"` + Statistics *RunnerScaleSetStatistic `json:"statistics,omitempty"` +} + +type RunnerScaleSetJitRunnerSetting struct { + Name string `json:"name"` + WorkFolder string `json:"workFolder"` +} + +type RunnerScaleSetMessage struct { + MessageId int64 `json:"messageId"` + MessageType string `json:"messageType"` + Body string `json:"body"` + Statistics *RunnerScaleSetStatistic `json:"statistics"` +} + +type runnerScaleSetsResponse struct { + Count int `json:"count"` + RunnerScaleSets []RunnerScaleSet `json:"value"` +} + +type RunnerScaleSetSession struct { + SessionId *uuid.UUID `json:"sessionId,omitempty"` + OwnerName string `json:"ownerName,omitempty"` + RunnerScaleSet *RunnerScaleSet `json:"runnerScaleSet,omitempty"` + MessageQueueUrl string `json:"messageQueueUrl,omitempty"` + MessageQueueAccessToken string `json:"messageQueueAccessToken,omitempty"` + Statistics *RunnerScaleSetStatistic `json:"statistics,omitempty"` +} + +type RunnerScaleSetStatistic struct { + TotalAvailableJobs int `json:"totalAvailableJobs"` + TotalAcquiredJobs int `json:"totalAcquiredJobs"` + TotalAssignedJobs int `json:"totalAssignedJobs"` + TotalRunningJobs int `json:"totalRunningJobs"` + TotalRegisteredRunners int `json:"totalRegisteredRunners"` + TotalBusyRunners int `json:"totalBusyRunners"` + TotalIdleRunners int `json:"totalIdleRunners"` +} + +type RunnerSetting struct { + Ephemeral bool `json:"ephemeral,omitempty"` + IsElastic bool `json:"isElastic,omitempty"` + DisableUpdate bool `json:"disableUpdate,omitempty"` +} + +type RunnerReferenceList struct { + Count int `json:"count"` + RunnerReferences []RunnerReference `json:"value"` +} + +type RunnerReference struct { + Id int `json:"id"` + Name string `json:"name"` + RunnerScaleSetId int `json:"runnerScaleSetId"` +} + +type RunnerScaleSetJitRunnerConfig struct { + Runner *RunnerReference `json:"runner"` + EncodedJITConfig string `json:"encodedJITConfig"` +} diff --git a/go.mod b/go.mod index 0cd443ad7e..940e660bf9 100644 --- a/go.mod +++ b/go.mod @@ -5,72 +5,93 @@ go 1.19 require ( github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 github.com/davecgh/go-spew v1.1.1 + github.com/evanphx/json-patch v4.12.0+incompatible github.com/go-logr/logr v1.2.3 + github.com/golang-jwt/jwt/v4 v4.4.1 github.com/google/go-cmp v0.5.9 github.com/google/go-github/v47 v47.1.0 + github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 + github.com/gruntwork-io/terratest v0.40.24 + github.com/hashicorp/go-retryablehttp v0.7.1 github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.24.1 - github.com/prometheus/client_golang v1.14.0 - github.com/stretchr/testify v1.8.1 + github.com/onsi/gomega v1.20.2 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.13.0 + github.com/stretchr/testify v1.8.0 github.com/teambition/rrule-go v1.8.0 - go.uber.org/zap v1.24.0 - golang.org/x/oauth2 v0.3.0 + go.uber.org/multierr v1.7.0 + go.uber.org/zap v1.23.0 + golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 gomodules.xyz/jsonpatch/v2 v2.2.0 - k8s.io/api v0.25.5 - k8s.io/apimachinery v0.25.5 - k8s.io/client-go v0.25.5 - sigs.k8s.io/controller-runtime v0.13.1 + k8s.io/api v0.25.2 + k8s.io/apimachinery v0.25.2 + k8s.io/client-go v0.25.2 + sigs.k8s.io/controller-runtime v0.13.0 sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go v0.97.0 // indirect + cloud.google.com/go/compute v1.12.1 // indirect + cloud.google.com/go/compute/metadata v0.2.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/aws/aws-sdk-go v1.40.56 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/emicklei/go-restful/v3 v3.8.0 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect github.com/go-logr/zapr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-sql-driver/mysql v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.4.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-github/v45 v45.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.1.0 // indirect - github.com/google/uuid v1.1.2 // indirect + github.com/gruntwork-io/go-commons v0.8.0 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.0 // indirect github.com/imdario/mergo v0.3.12 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect + github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nxadm/tail v1.4.8 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/pquerna/otp v1.2.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.4.0 // indirect + github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect - golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/term v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect + golang.org/x/crypto v0.0.0-20220824171710-5757bc0c5503 // indirect + golang.org/x/net v0.0.0-20221014081412-f15817d10f9b // indirect + golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/text v0.4.0 // indirect golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect diff --git a/go.sum b/go.sum index 0891d26087..bbf85b1816 100644 --- a/go.sum +++ b/go.sum @@ -13,25 +13,16 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -46,7 +37,6 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= @@ -58,17 +48,21 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/aws/aws-sdk-go v1.40.56 h1:FM2yjR0UUYFzDTMx+mH9Vyw1k1EUUxsAFzk+BjkzANA= +github.com/aws/aws-sdk-go v1.40.56/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 h1:5+NghM1Zred9Z078QEZtm28G/kfDfZN/92gkDlLwGVA= github.com/bradleyfalzon/ghinstallation/v2 v2.1.0/go.mod h1:Xg3xPRN5Mcq6GDqeUVhFbjEWMb4JHCyWEeeBGEYQoTU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -77,34 +71,36 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU= +github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -131,6 +127,8 @@ github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -151,8 +149,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -168,10 +164,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= @@ -184,10 +178,8 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -202,8 +194,6 @@ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -211,30 +201,40 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro= +github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78= +github.com/gruntwork-io/terratest v0.40.24 h1:vxVi714rX+joBLrxBVnbMzSYQ2srIfXzjqvImHl6Rtk= +github.com/gruntwork-io/terratest v0.40.24/go.mod h1:JGeIGgLbxbG9/Oqm06z6YXVr76CfomdmLkV564qov+8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -264,9 +264,19 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg= +github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -287,30 +297,31 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.5.0 h1:TRtrvv2vdQqzkwrQ1ke6vtXf7IK34RBUJafIy1wMwls= +github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= -github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= +github.com/onsi/gomega v1.20.2 h1:8uQq0zMgLEfa0vRrrBgaJF2gyW9Da9BmfGV+OyUzfkY= +github.com/onsi/gomega v1.20.2/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok= +github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= @@ -324,20 +335,22 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -345,33 +358,31 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/teambition/rrule-go v1.8.0 h1:a/IX5s56hGkFF+nRlJUooZU/45OTeeldBGL29nDKIHw= github.com/teambition/rrule-go v1.8.0/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -379,8 +390,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220824171710-5757bc0c5503 h1:vJ2V3lFLg+bBhgroYuRfyN583UzVveQmIXjc8T/y3to= +golang.org/x/crypto v0.0.0-20220824171710-5757bc0c5503/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -403,8 +414,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -413,9 +422,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -448,37 +454,22 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b h1:tvrvnPFcdzp294diPnrdZZZ8XUt2Tyj7svb7X52iDuU= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= -golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -489,12 +480,12 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -506,6 +497,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -525,50 +517,30 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -616,19 +588,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -651,18 +612,6 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -694,39 +643,13 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -739,20 +662,6 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -765,7 +674,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -782,7 +690,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -801,14 +708,14 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.5 h1:mqyHf7aoaYMpdvO87mqpol+Qnsmo+y09S0PMIXwiZKo= -k8s.io/api v0.25.5/go.mod h1:RzplZX0Z8rV/WhSTfEvnyd91bBhBQTRWo85qBQwRmb8= +k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8= +k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0= k8s.io/apiextensions-apiserver v0.25.0 h1:CJ9zlyXAbq0FIW8CD7HHyozCMBpDSiH7EdrSTCZcZFY= k8s.io/apiextensions-apiserver v0.25.0/go.mod h1:3pAjZiN4zw7R8aZC5gR0y3/vCkGlAjCazcg1me8iB/E= -k8s.io/apimachinery v0.25.5 h1:SQomYHvv+aO43qdu3QKRf9YuI0oI8w3RrOQ1qPbAUGY= -k8s.io/apimachinery v0.25.5/go.mod h1:1S2i1QHkmxc8+EZCIxe/fX5hpldVXk4gvnJInMEb8D4= -k8s.io/client-go v0.25.5 h1:7QWVK0Ph4bLn0UwotPTc2FTgm8shreQXyvXnnHDd8rE= -k8s.io/client-go v0.25.5/go.mod h1:bOeoaUUdpyz3WDFGo+Xm3nOQFh2KuYXRDwrvbAPtFQA= +k8s.io/apimachinery v0.25.2 h1:WbxfAjCx+AeN8Ilp9joWnyJ6xu9OMeS/fsfjK/5zaQs= +k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA= +k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo= +k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= k8s.io/component-base v0.25.0 h1:haVKlLkPCFZhkcqB6WCvpVxftrg6+FK5x1ZuaIDaQ5Y= k8s.io/component-base v0.25.0/go.mod h1:F2Sumv9CnbBlqrpdf7rKZTmmd2meJq0HizeyY/yAFxk= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= @@ -821,8 +728,8 @@ k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.13.1 h1:tUsRCSJVM1QQOOeViGeX3GMT3dQF1eePPw6sEE3xSlg= -sigs.k8s.io/controller-runtime v0.13.1/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= +sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRMc2WIQ= +sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/hash/fnv.go b/hash/fnv.go index a8382544a7..20507d4a3f 100644 --- a/hash/fnv.go +++ b/hash/fnv.go @@ -3,6 +3,7 @@ package hash import ( "fmt" "hash/fnv" + "k8s.io/apimachinery/pkg/util/rand" ) @@ -15,3 +16,9 @@ func FNVHashStringObjects(objs ...interface{}) string { return rand.SafeEncodeString(fmt.Sprint(hash.Sum32())) } + +func FNVHashString(name string) string { + hash := fnv.New32a() + hash.Write([]byte(name)) + return rand.SafeEncodeString(fmt.Sprint(hash.Sum32())) +} diff --git a/hash/hash.go b/hash/hash.go index a6c3e1c62f..06537314fa 100644 --- a/hash/hash.go +++ b/hash/hash.go @@ -5,9 +5,12 @@ package hash import ( + "fmt" "hash" + "hash/fnv" "github.com/davecgh/go-spew/spew" + "k8s.io/apimachinery/pkg/util/rand" ) // DeepHashObject writes specified object to hash using the spew library @@ -23,3 +26,26 @@ func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { } printer.Fprintf(hasher, "%#v", objectToWrite) } + +// ComputeHash returns a hash value calculated from template and +// a collisionCount to avoid hash collision. The hash will be safe encoded to +// avoid bad words. It expects **template. In other words, you should pass an address +// of a DeepCopy result. +// +// Proudly modified and adopted from k8s.io/kubernetes/pkg/util/hash.DeepHashObject and +// k8s.io/kubernetes/pkg/controller.ComputeHash. +func ComputeTemplateHash(template interface{}) string { + hasher := fnv.New32a() + + hasher.Reset() + + printer := spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + } + printer.Fprintf(hasher, "%#v", template) + + return rand.SafeEncodeString(fmt.Sprint(hasher.Sum32())) +} diff --git a/logging/logger.go b/logging/logger.go index f7b2a5d732..f2a684672a 100644 --- a/logging/logger.go +++ b/logging/logger.go @@ -18,6 +18,8 @@ const ( LogLevelInfo = "info" LogLevelWarn = "warn" LogLevelError = "error" + LogFormatText = "text" + LogFormatJSON = "json" ) var ( diff --git a/main.go b/main.go index 3260768f82..754c1cbbe3 100644 --- a/main.go +++ b/main.go @@ -17,20 +17,25 @@ limitations under the License. package main import ( + "context" "flag" "fmt" "os" "strings" "time" - actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1" + githubv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + summerwindv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1" "github.com/actions/actions-runner-controller/build" + actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com" actionssummerwindnet "github.com/actions/actions-runner-controller/controllers/actions.summerwind.net" "github.com/actions/actions-runner-controller/github" + "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/logging" "github.com/kelseyhightower/envconfig" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - + "k8s.io/apimachinery/pkg/types" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" @@ -48,8 +53,8 @@ var ( func init() { _ = clientgoscheme.AddToScheme(scheme) - - _ = actionsv1alpha1.AddToScheme(scheme) + _ = githubv1alpha1.AddToScheme(scheme) + _ = summerwindv1alpha1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } @@ -68,12 +73,14 @@ func main() { err error ghClient *github.Client - metricsAddr string - enableLeaderElection bool - runnerStatusUpdateHook bool - leaderElectionId string - port int - syncPeriod time.Duration + metricsAddr string + autoScalingRunnerSetOnly bool + enableLeaderElection bool + disableAdmissionWebhook bool + runnerStatusUpdateHook bool + leaderElectionId string + port int + syncPeriod time.Duration defaultScaleDownDelay time.Duration @@ -86,6 +93,8 @@ func main() { logLevel string logFormat string + autoScalerImagePullSecrets stringSlice + commonRunnerLabels commaSeparatedStringSlice ) var c github.Config @@ -121,7 +130,8 @@ func main() { flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.") flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`) flag.StringVar(&logFormat, "log-format", "text", `The log format. Valid options are "text" and "json". Defaults to "text"`) - + flag.BoolVar(&autoScalingRunnerSetOnly, "auto-scaling-runner-set-only", false, "Make controller only reconcile AutoRunnerScaleSet object.") + flag.Var(&autoScalerImagePullSecrets, "auto-scaler-image-pull-secrets", "The default image-pull secret name for auto-scaler listener container.") flag.Parse() log, err := logging.NewLogger(logLevel, logFormat) @@ -131,14 +141,21 @@ func main() { } c.Log = &log - ghClient, err = c.NewClient() - if err != nil { - fmt.Fprintln(os.Stderr, "Error: Client creation failed.", err) - os.Exit(1) + if !autoScalingRunnerSetOnly { + ghClient, err = c.NewClient() + if err != nil { + log.Error(err, "unable to create client") + os.Exit(1) + } } ctrl.SetLogger(log) + if autoScalingRunnerSetOnly { + // We don't support metrics for AutoRunnerScaleSet for now + metricsAddr = "0" + } + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, MetricsBindAddress: metricsAddr, @@ -158,148 +175,224 @@ func main() { ghClient, ) - runnerReconciler := &actionssummerwindnet.RunnerReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("runner"), - Scheme: mgr.GetScheme(), - GitHubClient: multiClient, - DockerImage: dockerImage, - DockerRegistryMirror: dockerRegistryMirror, - UseRunnerStatusUpdateHook: runnerStatusUpdateHook, - // Defaults for self-hosted runner containers - RunnerImage: runnerImage, - RunnerImagePullSecrets: runnerImagePullSecrets, - } + actionsMultiClient := actions.NewMultiClient( + "actions-runner-controller/"+build.Version, + log.WithName("actions-clients"), + ) - if err = runnerReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "Runner") - os.Exit(1) - } + if !autoScalingRunnerSetOnly { + runnerReconciler := &actionssummerwindnet.RunnerReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("runner"), + Scheme: mgr.GetScheme(), + GitHubClient: multiClient, + DockerImage: dockerImage, + DockerRegistryMirror: dockerRegistryMirror, + UseRunnerStatusUpdateHook: runnerStatusUpdateHook, + // Defaults for self-hosted runner containers + RunnerImage: runnerImage, + RunnerImagePullSecrets: runnerImagePullSecrets, + } - runnerReplicaSetReconciler := &actionssummerwindnet.RunnerReplicaSetReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("runnerreplicaset"), - Scheme: mgr.GetScheme(), - } + if err = runnerReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "Runner") + os.Exit(1) + } - if err = runnerReplicaSetReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "RunnerReplicaSet") - os.Exit(1) - } + runnerReplicaSetReconciler := &actionssummerwindnet.RunnerReplicaSetReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("runnerreplicaset"), + Scheme: mgr.GetScheme(), + } - runnerDeploymentReconciler := &actionssummerwindnet.RunnerDeploymentReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("runnerdeployment"), - Scheme: mgr.GetScheme(), - CommonRunnerLabels: commonRunnerLabels, - } + if err = runnerReplicaSetReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "RunnerReplicaSet") + os.Exit(1) + } - if err = runnerDeploymentReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "RunnerDeployment") - os.Exit(1) - } + runnerDeploymentReconciler := &actionssummerwindnet.RunnerDeploymentReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("runnerdeployment"), + Scheme: mgr.GetScheme(), + CommonRunnerLabels: commonRunnerLabels, + } - runnerSetReconciler := &actionssummerwindnet.RunnerSetReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("runnerset"), - Scheme: mgr.GetScheme(), - CommonRunnerLabels: commonRunnerLabels, - DockerImage: dockerImage, - DockerRegistryMirror: dockerRegistryMirror, - GitHubClient: multiClient, - // Defaults for self-hosted runner containers - RunnerImage: runnerImage, - RunnerImagePullSecrets: runnerImagePullSecrets, - UseRunnerStatusUpdateHook: runnerStatusUpdateHook, - } + if err = runnerDeploymentReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "RunnerDeployment") + os.Exit(1) + } - if err = runnerSetReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "RunnerSet") - os.Exit(1) - } + runnerSetReconciler := &actionssummerwindnet.RunnerSetReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("runnerset"), + Scheme: mgr.GetScheme(), + CommonRunnerLabels: commonRunnerLabels, + DockerImage: dockerImage, + DockerRegistryMirror: dockerRegistryMirror, + GitHubClient: multiClient, + // Defaults for self-hosted runner containers + RunnerImage: runnerImage, + RunnerImagePullSecrets: runnerImagePullSecrets, + UseRunnerStatusUpdateHook: runnerStatusUpdateHook, + } - log.Info( - "Initializing actions-runner-controller", - "version", build.Version, - "default-scale-down-delay", defaultScaleDownDelay, - "sync-period", syncPeriod, - "default-runner-image", runnerImage, - "default-docker-image", dockerImage, - "common-runnner-labels", commonRunnerLabels, - "leader-election-enabled", enableLeaderElection, - "leader-election-id", leaderElectionId, - "watch-namespace", namespace, - ) + if err = runnerSetReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "RunnerSet") + os.Exit(1) + } - horizontalRunnerAutoscaler := &actionssummerwindnet.HorizontalRunnerAutoscalerReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("horizontalrunnerautoscaler"), - Scheme: mgr.GetScheme(), - GitHubClient: multiClient, - DefaultScaleDownDelay: defaultScaleDownDelay, - } + log.Info( + "Initializing actions-runner-controller", + "version", build.Version, + "default-scale-down-delay", defaultScaleDownDelay, + "sync-period", syncPeriod, + "default-runner-image", runnerImage, + "default-docker-image", dockerImage, + "common-runnner-labels", commonRunnerLabels, + "leader-election-enabled", enableLeaderElection, + "leader-election-id", leaderElectionId, + "watch-namespace", namespace, + ) + + horizontalRunnerAutoscaler := &actionssummerwindnet.HorizontalRunnerAutoscalerReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("horizontalrunnerautoscaler"), + Scheme: mgr.GetScheme(), + GitHubClient: multiClient, + DefaultScaleDownDelay: defaultScaleDownDelay, + } - runnerPodReconciler := &actionssummerwindnet.RunnerPodReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("runnerpod"), - Scheme: mgr.GetScheme(), - GitHubClient: multiClient, - } + runnerPodReconciler := &actionssummerwindnet.RunnerPodReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("runnerpod"), + Scheme: mgr.GetScheme(), + GitHubClient: multiClient, + } - runnerPersistentVolumeReconciler := &actionssummerwindnet.RunnerPersistentVolumeReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("runnerpersistentvolume"), - Scheme: mgr.GetScheme(), - } + runnerPersistentVolumeReconciler := &actionssummerwindnet.RunnerPersistentVolumeReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("runnerpersistentvolume"), + Scheme: mgr.GetScheme(), + } - runnerPersistentVolumeClaimReconciler := &actionssummerwindnet.RunnerPersistentVolumeClaimReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("runnerpersistentvolumeclaim"), - Scheme: mgr.GetScheme(), + runnerPersistentVolumeClaimReconciler := &actionssummerwindnet.RunnerPersistentVolumeClaimReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("runnerpersistentvolumeclaim"), + Scheme: mgr.GetScheme(), + } + + if err = runnerPodReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "RunnerPod") + os.Exit(1) + } + + if err = horizontalRunnerAutoscaler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "HorizontalRunnerAutoscaler") + os.Exit(1) + } + + if err = runnerPersistentVolumeReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "RunnerPersistentVolume") + os.Exit(1) + } + + if err = runnerPersistentVolumeClaimReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "RunnerPersistentVolumeClaim") + os.Exit(1) + } + + if !disableAdmissionWebhook { + if err = (&summerwindv1alpha1.Runner{}).SetupWebhookWithManager(mgr); err != nil { + log.Error(err, "unable to create webhook", "webhook", "Runner") + os.Exit(1) + } + if err = (&summerwindv1alpha1.RunnerDeployment{}).SetupWebhookWithManager(mgr); err != nil { + log.Error(err, "unable to create webhook", "webhook", "RunnerDeployment") + os.Exit(1) + } + if err = (&summerwindv1alpha1.RunnerReplicaSet{}).SetupWebhookWithManager(mgr); err != nil { + log.Error(err, "unable to create webhook", "webhook", "RunnerReplicaSet") + os.Exit(1) + } + } } - if err = runnerPodReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "RunnerPod") + mgrPodName := os.Getenv("CONTROLLER_MANAGER_POD_NAME") + mgrPodNamespace := os.Getenv("CONTROLLER_MANAGER_POD_NAMESPACE") + var mgrPod corev1.Pod + err = mgr.GetAPIReader().Get(context.Background(), types.NamespacedName{Namespace: mgrPodNamespace, Name: mgrPodName}, &mgrPod) + if err != nil { + log.Error(err, fmt.Sprintf("unable to obtain manager pod: %s (%s)", mgrPodName, mgrPodNamespace)) os.Exit(1) } - if err = horizontalRunnerAutoscaler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "HorizontalRunnerAutoscaler") - os.Exit(1) + var mgrContainer *corev1.Container + for _, container := range mgrPod.Spec.Containers { + if container.Name == "manager" { + mgrContainer = &container + break + } } - if err = runnerPersistentVolumeReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "RunnerPersistentVolume") + if mgrContainer != nil { + log.Info("Detected manager container", "image", mgrContainer.Image) + } else { + log.Error(err, "unable to obtain manager container image") os.Exit(1) } - if err = runnerPersistentVolumeClaimReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "RunnerPersistentVolumeClaim") + if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("AutoscalingRunnerSet"), + Scheme: mgr.GetScheme(), + ControllerNamespace: mgrPodNamespace, + DefaultRunnerScaleSetListenerImage: mgrContainer.Image, + ActionsClient: actionsMultiClient, + DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets, + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet") os.Exit(1) } - if err = (&actionsv1alpha1.Runner{}).SetupWebhookWithManager(mgr); err != nil { - log.Error(err, "unable to create webhook", "webhook", "Runner") + if err = (&actionsgithubcom.EphemeralRunnerReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("EphemeralRunner"), + Scheme: mgr.GetScheme(), + ActionsClient: actionsMultiClient, + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "EphemeralRunner") os.Exit(1) } - if err = (&actionsv1alpha1.RunnerDeployment{}).SetupWebhookWithManager(mgr); err != nil { - log.Error(err, "unable to create webhook", "webhook", "RunnerDeployment") + + if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("EphemeralRunnerSet"), + Scheme: mgr.GetScheme(), + ActionsClient: actionsMultiClient, + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet") os.Exit(1) } - if err = (&actionsv1alpha1.RunnerReplicaSet{}).SetupWebhookWithManager(mgr); err != nil { - log.Error(err, "unable to create webhook", "webhook", "RunnerReplicaSet") + if err = (&actionsgithubcom.AutoscalingListenerReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("AutoscalingListener"), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "AutoscalingListener") os.Exit(1) } // +kubebuilder:scaffold:builder - injector := &actionssummerwindnet.PodRunnerTokenInjector{ - Client: mgr.GetClient(), - GitHubClient: multiClient, - Log: ctrl.Log.WithName("webhook").WithName("PodRunnerTokenInjector"), - } - if err = injector.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create webhook server", "webhook", "PodRunnerTokenInjector") - os.Exit(1) + if !disableAdmissionWebhook && !autoScalingRunnerSetOnly { + injector := &actionssummerwindnet.PodRunnerTokenInjector{ + Client: mgr.GetClient(), + GitHubClient: multiClient, + Log: ctrl.Log.WithName("webhook").WithName("PodRunnerTokenInjector"), + } + if err = injector.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create webhook server", "webhook", "PodRunnerTokenInjector") + os.Exit(1) + } } log.Info("starting manager") From 3b3b32549c6faba0c6e7720f65d94af08128513d Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Tue, 17 Jan 2023 18:07:52 +0100 Subject: [PATCH 020/561] Create publish-arc2.yaml (#2167) --- .github/workflows/publish-arc2.yaml | 133 ++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 .github/workflows/publish-arc2.yaml diff --git a/.github/workflows/publish-arc2.yaml b/.github/workflows/publish-arc2.yaml new file mode 100644 index 0000000000..aab90e2ea7 --- /dev/null +++ b/.github/workflows/publish-arc2.yaml @@ -0,0 +1,133 @@ +name: Publish ARC 2 + +on: + workflow_dispatch: + inputs: + ref: + description: 'The branch, tag or SHA to cut a release from' + required: false + type: string + default: '' + release_tag_name: + description: 'The name to tag the controller image with' + required: true + type: string + default: 'canary' + push_to_registries: + description: 'Push images to registries' + required: true + type: boolean + default: false + publish_helm: + description: 'Publish new helm chart' + required: true + type: boolean + default: false + +env: + HELM_VERSION: v3.8.0 + +permissions: + packages: write + +jobs: + build-push-image: + name: Build and push controller image + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + # If inputs.ref is empty, it'll resolve to the default branch + ref: ${{ inputs.ref }} + + - name: Resolve parameters + id: resolve_parameters + run: | + echo "INFO: Resolving short SHA for ${{ inputs.ref }}" + echo "short_sha=$(git rev-parse --short ${{ inputs.ref }})" >> $GITHUB_OUTPUT + echo "INFO: Normalizing repository name (lowercase)" + echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + version: latest + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build & push controller image + uses: docker/build-push-action@v3 + with: + file: Dockerfile + platforms: linux/amd64,linux/arm64 + build-args: VERSION=${{ inputs.release_tag_name }} + push: ${{ inputs.push_to_registries }} + tags: | + ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-2:${{ inputs.release_tag_name }} + ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-2:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Job summary + run: | + echo "The [publish-arc2](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/publish-arc2.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY + echo "- Ref: ${{ inputs.ref }}" >> $GITHUB_STEP_SUMMARY + echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY + echo "- Release tag: ${{ inputs.release_tag_name }}" >> $GITHUB_STEP_SUMMARY + echo "- Push to registries: ${{ inputs.push_to_registries }}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + publish-helm-chart: + if: ${{ inputs.publish_helm == true }} + needs: build-push-image + name: Publish Helm chart + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + # If inputs.ref is empty, it'll resolve to the default branch + ref: ${{ inputs.ref }} + + - name: Resolve parameters + id: resolve_parameters + run: | + echo "INFO: Resolving short SHA for ${{ inputs.ref }}" + echo "short_sha=$(git rev-parse --short ${{ inputs.ref }})" >> $GITHUB_OUTPUT + echo "INFO: Normalizing repository name (lowercase)" + echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT + + - name: Set up Helm + uses: azure/setup-helm@v3.3 + with: + version: ${{ env.HELM_VERSION }} + + - name: Publish new helm chart + run: | + echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin + CHART_VERSION='$(cat charts_preview/actions-runner-controller-2/Chart.yaml | grep version: | cut -d " " -f 2)' + echo "CHART_VERSION_TAG=${CHART_VERSION}-${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_ENV + helm package charts_preview/actions-runner-controller-2/ --version="${CHART_VERSION}-${{ steps.resolve_parameters.outputs.short_sha }}" + # Tag is inferred from SemVer of Chart and cannot be set manually. + # See https://helm.sh/docs/topics/registries/#the-push-subcommand + helm push actions-runner-controller-"${CHART_VERSION}-${{ steps.resolve_parameters.outputs.short_sha }}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-helm-chart-2 + + - name: Job summary + run: | + echo "New helm chart published successfully!" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY + echo "- Ref: ${{ inputs.ref }}" >> $GITHUB_STEP_SUMMARY + echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY + echo "- Chart version: ${{ env.CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY From c0641026b433559dd1c28f833e13ce3662d2233d Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 17 Jan 2023 12:58:37 -0500 Subject: [PATCH 021/561] Populate resolve ref when input.ref is empty. (#2170) --- .github/workflows/publish-arc2.yaml | 23 +++++++++++++++++------ .github/workflows/validate-chart.yaml | 2 -- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/.github/workflows/publish-arc2.yaml b/.github/workflows/publish-arc2.yaml index aab90e2ea7..78fccc4acf 100644 --- a/.github/workflows/publish-arc2.yaml +++ b/.github/workflows/publish-arc2.yaml @@ -44,8 +44,14 @@ jobs: - name: Resolve parameters id: resolve_parameters run: | - echo "INFO: Resolving short SHA for ${{ inputs.ref }}" - echo "short_sha=$(git rev-parse --short ${{ inputs.ref }})" >> $GITHUB_OUTPUT + resolvedRef="${{ inputs.ref }}" + if [ -z "$resolvedRef" ] + then + resolvedRef="${{ github.ref }}" + fi + echo "resolved_ref=$resolveRef" >> $GITHUB_OUTPUT + echo "INFO: Resolving short SHA for $resolveRef" + echo "short_sha=$(git rev-parse --short $resolveRef)" >> $GITHUB_OUTPUT echo "INFO: Normalizing repository name (lowercase)" echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT @@ -82,7 +88,7 @@ jobs: echo "The [publish-arc2](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/publish-arc2.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY - echo "- Ref: ${{ inputs.ref }}" >> $GITHUB_STEP_SUMMARY + echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY echo "- Release tag: ${{ inputs.release_tag_name }}" >> $GITHUB_STEP_SUMMARY echo "- Push to registries: ${{ inputs.push_to_registries }}" >> $GITHUB_STEP_SUMMARY @@ -103,8 +109,13 @@ jobs: - name: Resolve parameters id: resolve_parameters run: | - echo "INFO: Resolving short SHA for ${{ inputs.ref }}" - echo "short_sha=$(git rev-parse --short ${{ inputs.ref }})" >> $GITHUB_OUTPUT + resolvedRef="${{ inputs.ref }}" + if [ -z "$resolvedRef" ] + then + resolvedRef="${{ github.ref }}" + fi + echo "INFO: Resolving short SHA for $resolvedRef" + echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT echo "INFO: Normalizing repository name (lowercase)" echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT @@ -128,6 +139,6 @@ jobs: echo "New helm chart published successfully!" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY - echo "- Ref: ${{ inputs.ref }}" >> $GITHUB_STEP_SUMMARY + echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY echo "- Chart version: ${{ env.CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/validate-chart.yaml b/.github/workflows/validate-chart.yaml index a0fc2b4d89..99fd267dfe 100644 --- a/.github/workflows/validate-chart.yaml +++ b/.github/workflows/validate-chart.yaml @@ -6,8 +6,6 @@ on: - 'charts/**' - '.github/workflows/validate-chart.yaml' - '!charts/actions-runner-controller/docs/**' - - '!charts/actions-runner-controller-2/**' - - '!charts/auto-scaling-runner-set/**' - '!**.md' workflow_dispatch: env: From 1daf3a85e92de7e8f43a013f5bb5fdbf13614870 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 17 Jan 2023 13:07:40 -0500 Subject: [PATCH 022/561] Fix typo in workflow. (#2172) --- .github/workflows/publish-arc2.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish-arc2.yaml b/.github/workflows/publish-arc2.yaml index 78fccc4acf..8d26a84a72 100644 --- a/.github/workflows/publish-arc2.yaml +++ b/.github/workflows/publish-arc2.yaml @@ -49,9 +49,9 @@ jobs: then resolvedRef="${{ github.ref }}" fi - echo "resolved_ref=$resolveRef" >> $GITHUB_OUTPUT - echo "INFO: Resolving short SHA for $resolveRef" - echo "short_sha=$(git rev-parse --short $resolveRef)" >> $GITHUB_OUTPUT + echo "resolved_ref=$resolvedRef" >> $GITHUB_OUTPUT + echo "INFO: Resolving short SHA for $resolvedRef" + echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT echo "INFO: Normalizing repository name (lowercase)" echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT From a0f8213f3c4448444cce41b498abd608a9906ad4 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 17 Jan 2023 14:36:04 -0500 Subject: [PATCH 023/561] Introduce new helm charts for the preview auto-scaling mode for ARC. (#2168) --- Makefile | 4 + .../actions-runner-controller-2/.helmignore | 23 + charts/actions-runner-controller-2/Chart.yaml | 33 + ...tions.github.com_autoscalinglisteners.yaml | 97 + ...ions.github.com_autoscalingrunnersets.yaml | 4218 ++++++++++++++++ .../actions.github.com_ephemeralrunners.yaml | 4249 +++++++++++++++++ ...ctions.github.com_ephemeralrunnersets.yaml | 4206 ++++++++++++++++ .../templates/NOTES.txt | 3 + .../templates/_helpers.tpl | 97 + .../templates/deployment.yaml | 98 + .../templates/leader_election_role.yaml | 12 + .../leader_election_role_binding.yaml | 15 + .../templates/manager_role.yaml | 250 + .../templates/manager_role_binding.yaml | 12 + .../templates/serviceaccount.yaml | 13 + .../tests/template_test.go | 508 ++ .../actions-runner-controller-2/values.yaml | 65 + charts/auto-scaling-runner-set/.helmignore | 23 + charts/auto-scaling-runner-set/Chart.yaml | 33 + .../auto-scaling-runner-set/ci/ci-values.yaml | 6 + .../templates/NOTES.txt | 3 + .../templates/_helpers.tpl | 313 ++ .../templates/autoscalingrunnerset.yaml | 91 + .../templates/githubsecret.yaml | 37 + .../templates/kube_mode_role.yaml | 24 + .../templates/kube_mode_role_binding.yaml | 15 + .../templates/kube_mode_serviceaccount.yaml | 9 + .../no_permission_serviceaccount.yaml | 9 + .../tests/template_test.go | 606 +++ charts/auto-scaling-runner-set/values.yaml | 117 + 30 files changed, 15189 insertions(+) create mode 100644 charts/actions-runner-controller-2/.helmignore create mode 100644 charts/actions-runner-controller-2/Chart.yaml create mode 100644 charts/actions-runner-controller-2/crds/actions.github.com_autoscalinglisteners.yaml create mode 100644 charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml create mode 100644 charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml create mode 100644 charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml create mode 100644 charts/actions-runner-controller-2/templates/NOTES.txt create mode 100644 charts/actions-runner-controller-2/templates/_helpers.tpl create mode 100644 charts/actions-runner-controller-2/templates/deployment.yaml create mode 100644 charts/actions-runner-controller-2/templates/leader_election_role.yaml create mode 100644 charts/actions-runner-controller-2/templates/leader_election_role_binding.yaml create mode 100644 charts/actions-runner-controller-2/templates/manager_role.yaml create mode 100644 charts/actions-runner-controller-2/templates/manager_role_binding.yaml create mode 100644 charts/actions-runner-controller-2/templates/serviceaccount.yaml create mode 100644 charts/actions-runner-controller-2/tests/template_test.go create mode 100644 charts/actions-runner-controller-2/values.yaml create mode 100644 charts/auto-scaling-runner-set/.helmignore create mode 100644 charts/auto-scaling-runner-set/Chart.yaml create mode 100644 charts/auto-scaling-runner-set/ci/ci-values.yaml create mode 100644 charts/auto-scaling-runner-set/templates/NOTES.txt create mode 100644 charts/auto-scaling-runner-set/templates/_helpers.tpl create mode 100644 charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml create mode 100644 charts/auto-scaling-runner-set/templates/githubsecret.yaml create mode 100644 charts/auto-scaling-runner-set/templates/kube_mode_role.yaml create mode 100644 charts/auto-scaling-runner-set/templates/kube_mode_role_binding.yaml create mode 100644 charts/auto-scaling-runner-set/templates/kube_mode_serviceaccount.yaml create mode 100644 charts/auto-scaling-runner-set/templates/no_permission_serviceaccount.yaml create mode 100644 charts/auto-scaling-runner-set/tests/template_test.go create mode 100644 charts/auto-scaling-runner-set/values.yaml diff --git a/Makefile b/Makefile index e29d42f045..d997d3fa2d 100644 --- a/Makefile +++ b/Makefile @@ -116,6 +116,10 @@ manifests-gen-crds: controller-gen yq chart-crds: cp config/crd/bases/*.yaml charts/actions-runner-controller/crds/ + cp config/crd/bases/actions.github.com_autoscalingrunnersets.yaml charts/actions-runner-controller-2/crds/ + cp config/crd/bases/actions.github.com_autoscalinglisteners.yaml charts/actions-runner-controller-2/crds/ + cp config/crd/bases/actions.github.com_ephemeralrunnersets.yaml charts/actions-runner-controller-2/crds/ + cp config/crd/bases/actions.github.com_ephemeralrunners.yaml charts/actions-runner-controller-2/crds/ rm charts/actions-runner-controller/crds/actions.github.com_autoscalingrunnersets.yaml rm charts/actions-runner-controller/crds/actions.github.com_autoscalinglisteners.yaml rm charts/actions-runner-controller/crds/actions.github.com_ephemeralrunnersets.yaml diff --git a/charts/actions-runner-controller-2/.helmignore b/charts/actions-runner-controller-2/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/charts/actions-runner-controller-2/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/actions-runner-controller-2/Chart.yaml b/charts/actions-runner-controller-2/Chart.yaml new file mode 100644 index 0000000000..e592f5a12d --- /dev/null +++ b/charts/actions-runner-controller-2/Chart.yaml @@ -0,0 +1,33 @@ +apiVersion: v2 +name: actions-runner-controller-2 +description: A Helm chart for install actions-runner-controller CRD + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "preview" + +home: https://github.com/actions/actions-runner-controller + +sources: + - "https://github.com/actions/actions-runner-controller" + +maintainers: + - name: actions + url: https://github.com/actions \ No newline at end of file diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_autoscalinglisteners.yaml b/charts/actions-runner-controller-2/crds/actions.github.com_autoscalinglisteners.yaml new file mode 100644 index 0000000000..18946cb318 --- /dev/null +++ b/charts/actions-runner-controller-2/crds/actions.github.com_autoscalinglisteners.yaml @@ -0,0 +1,97 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + name: autoscalinglisteners.actions.github.com +spec: + group: actions.github.com + names: + kind: AutoscalingListener + listKind: AutoscalingListenerList + plural: autoscalinglisteners + singular: autoscalinglistener + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.githubConfigUrl + name: GitHub Configure URL + type: string + - jsonPath: .spec.autoscalingRunnerSetNamespace + name: AutoscalingRunnerSet Namespace + type: string + - jsonPath: .spec.autoscalingRunnerSetName + name: AutoscalingRunnerSet Name + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutoscalingListener is the Schema for the autoscalinglisteners API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AutoscalingListenerSpec defines the desired state of AutoscalingListener + properties: + autoscalingRunnerSetName: + description: Required + type: string + autoscalingRunnerSetNamespace: + description: Required + type: string + ephemeralRunnerSetName: + description: Required + type: string + githubConfigSecret: + description: Required + type: string + githubConfigUrl: + description: Required + type: string + image: + description: Required + type: string + imagePullSecrets: + description: Required + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + maxRunners: + description: Required + minimum: 0 + type: integer + minRunners: + description: Required + minimum: 0 + type: integer + runnerScaleSetId: + description: Required + type: integer + type: object + status: + description: AutoscalingListenerStatus defines the observed state of AutoscalingListener + type: object + type: object + served: true + storage: true + subresources: + status: {} + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml b/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml new file mode 100644 index 0000000000..9d60edaa99 --- /dev/null +++ b/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml @@ -0,0 +1,4218 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + name: autoscalingrunnersets.actions.github.com +spec: + group: actions.github.com + names: + kind: AutoscalingRunnerSet + listKind: AutoscalingRunnerSetList + plural: autoscalingrunnersets + singular: autoscalingrunnerset + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.minRunners + name: Minimum Runners + type: number + - jsonPath: .spec.maxRunners + name: Maximum Runners + type: number + - jsonPath: .status.currentRunners + name: Current Runners + type: number + - jsonPath: .status.state + name: State + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutoscalingRunnerSet is the Schema for the autoscalingrunnersets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AutoscalingRunnerSetSpec defines the desired state of AutoscalingRunnerSet + properties: + githubConfigSecret: + description: Required + type: string + githubConfigUrl: + description: Required + type: string + githubServerTLS: + properties: + certConfigMapRef: + description: Required + type: string + type: object + maxRunners: + minimum: 0 + type: integer + minRunners: + minimum: 0 + type: integer + proxy: + properties: + http: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + https: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + type: object + runnerGroup: + type: string + template: + description: Required + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: 'Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + activeDeadlineSeconds: + description: Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + type: boolean + containers: + description: List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. + properties: + nameservers: + description: A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: 'EnableServiceLinks indicates whether information about services should be injected into pod''s environment variables, matching the syntax of Docker links. Optional: Defaults to true.' + type: boolean + ephemeralContainers: + description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted." + properties: + args: + description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral containers. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + targetContainerName: + description: "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. \n The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined." + type: string + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. + items: + description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: 'Use the host''s ipc namespace. Optional: Default to false.' + type: boolean + hostNetwork: + description: Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. + type: boolean + hostPID: + description: 'Use the host''s pid namespace. Optional: Default to false.' + type: boolean + hostUsers: + description: 'Use the host''s user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.' + type: boolean + hostname: + description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: 'ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + x-kubernetes-map-type: atomic + os: + description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup" + properties: + name: + description: 'Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null' + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md' + type: object + preemptionPolicy: + description: PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. + type: string + readinessGates: + description: 'If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates' + items: + description: PodReadinessGate contains the reference to a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + type: string + runtimeClassName: + description: 'RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class' + type: string + schedulerName: + description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. + type: string + securityContext: + description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' + properties: + fsGroup: + description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: 'DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.' + type: string + serviceAccountName: + description: 'ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/' + type: string + setHostnameAsFQDN: + description: If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. + type: boolean + shareProcessNamespace: + description: 'Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.' + type: boolean + subdomain: + description: If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: 'List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes' + items: + description: Volume represents a named volume in a pod that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. \n Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). \n Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. \n Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. \n A pod can use both types of ephemeral volumes and persistent volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). \n An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. \n This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with other supported volume types + properties: + configMap: + description: configMap information about the configMap data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional field specify whether the Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the mount point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no group + type: string + readOnly: + description: readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + type: object + status: + description: AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet + properties: + currentRunners: + type: integer + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml new file mode 100644 index 0000000000..4712d7662f --- /dev/null +++ b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml @@ -0,0 +1,4249 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + name: ephemeralrunners.actions.github.com +spec: + group: actions.github.com + names: + kind: EphemeralRunner + listKind: EphemeralRunnerList + plural: ephemeralrunners + singular: ephemeralrunner + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.githubConfigUrl + name: GitHub Config URL + type: string + - jsonPath: .status.runnerId + name: RunnerId + type: number + - jsonPath: .status.phase + name: Status + type: string + - jsonPath: .status.jobRepositoryName + name: JobRepository + type: string + - jsonPath: .status.jobWorkflowRef + name: JobWorkflowRef + type: string + - jsonPath: .status.workflowRunId + name: WorkflowRunId + type: number + - jsonPath: .status.jobDisplayName + name: JobDisplayName + type: string + - jsonPath: .status.message + name: Message + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: EphemeralRunner is the Schema for the ephemeralrunners API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: EphemeralRunnerSpec defines the desired state of EphemeralRunner + properties: + githubConfigSecret: + type: string + githubConfigUrl: + type: string + githubServerTLS: + properties: + certConfigMapRef: + description: Required + type: string + type: object + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + proxy: + properties: + http: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + https: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + type: object + runnerScaleSetId: + type: integer + spec: + description: 'Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + activeDeadlineSeconds: + description: Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + type: boolean + containers: + description: List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. + properties: + nameservers: + description: A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: 'EnableServiceLinks indicates whether information about services should be injected into pod''s environment variables, matching the syntax of Docker links. Optional: Defaults to true.' + type: boolean + ephemeralContainers: + description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted." + properties: + args: + description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral containers. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + targetContainerName: + description: "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. \n The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined." + type: string + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. + items: + description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: 'Use the host''s ipc namespace. Optional: Default to false.' + type: boolean + hostNetwork: + description: Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. + type: boolean + hostPID: + description: 'Use the host''s pid namespace. Optional: Default to false.' + type: boolean + hostUsers: + description: 'Use the host''s user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.' + type: boolean + hostname: + description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: 'ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + x-kubernetes-map-type: atomic + os: + description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup" + properties: + name: + description: 'Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null' + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md' + type: object + preemptionPolicy: + description: PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. + type: string + readinessGates: + description: 'If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates' + items: + description: PodReadinessGate contains the reference to a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + type: string + runtimeClassName: + description: 'RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class' + type: string + schedulerName: + description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. + type: string + securityContext: + description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' + properties: + fsGroup: + description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: 'DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.' + type: string + serviceAccountName: + description: 'ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/' + type: string + setHostnameAsFQDN: + description: If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. + type: boolean + shareProcessNamespace: + description: 'Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.' + type: boolean + subdomain: + description: If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: 'List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes' + items: + description: Volume represents a named volume in a pod that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. \n Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). \n Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. \n Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. \n A pod can use both types of ephemeral volumes and persistent volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). \n An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. \n This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with other supported volume types + properties: + configMap: + description: configMap information about the configMap data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional field specify whether the Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the mount point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no group + type: string + readOnly: + description: readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + status: + description: EphemeralRunnerStatus defines the observed state of EphemeralRunner + properties: + failures: + additionalProperties: + type: boolean + type: object + jobDisplayName: + type: string + jobRepositoryName: + type: string + jobRequestId: + format: int64 + type: integer + jobWorkflowRef: + type: string + message: + type: string + phase: + description: "Phase describes phases where EphemeralRunner can be in. The underlying type is a PodPhase, but the meaning is more restrictive \n The PodFailed phase should be set only when EphemeralRunner fails to start after multiple retries. That signals that this EphemeralRunner won't work, and manual inspection is required \n The PodSucceded phase should be set only when confirmed that EphemeralRunner actually executed the job and has been removed from the service." + type: string + ready: + description: Turns true only if the runner is online. + type: boolean + reason: + type: string + runnerId: + type: integer + runnerJITConfig: + type: string + runnerName: + type: string + workflowRunId: + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml new file mode 100644 index 0000000000..913aee5dc2 --- /dev/null +++ b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml @@ -0,0 +1,4206 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + name: ephemeralrunnersets.actions.github.com +spec: + group: actions.github.com + names: + kind: EphemeralRunnerSet + listKind: EphemeralRunnerSetList + plural: ephemeralrunnersets + singular: ephemeralrunnerset + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.replicas + name: DesiredReplicas + type: integer + - jsonPath: .status.currentReplicas + name: CurrentReplicas + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: EphemeralRunnerSet is the Schema for the ephemeralrunnersets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: EphemeralRunnerSetSpec defines the desired state of EphemeralRunnerSet + properties: + ephemeralRunnerSpec: + description: EphemeralRunnerSpec defines the desired state of EphemeralRunner + properties: + githubConfigSecret: + type: string + githubConfigUrl: + type: string + githubServerTLS: + properties: + certConfigMapRef: + description: Required + type: string + type: object + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + proxy: + properties: + http: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + https: + properties: + credentialSecretRef: + type: string + noProxy: + items: + type: string + type: array + url: + description: Required + type: string + type: object + type: object + runnerScaleSetId: + type: integer + spec: + description: 'Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + activeDeadlineSeconds: + description: Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + type: boolean + containers: + description: List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. + properties: + nameservers: + description: A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: 'EnableServiceLinks indicates whether information about services should be injected into pod''s environment variables, matching the syntax of Docker links. Optional: Defaults to true.' + type: boolean + ephemeralContainers: + description: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted." + properties: + args: + description: 'Arguments to the entrypoint. The image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral containers. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + targetContainerName: + description: "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. \n The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined." + type: string + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. + items: + description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: 'Use the host''s ipc namespace. Optional: Default to false.' + type: boolean + hostNetwork: + description: Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. + type: boolean + hostPID: + description: 'Use the host''s pid namespace. Optional: Default to false.' + type: boolean + hostUsers: + description: 'Use the host''s user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.' + type: boolean + hostname: + description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: 'ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + x-kubernetes-map-type: atomic + os: + description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup" + properties: + name: + description: 'Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null' + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md' + type: object + preemptionPolicy: + description: PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. + type: string + readinessGates: + description: 'If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates' + items: + description: PodReadinessGate contains the reference to a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + type: string + runtimeClassName: + description: 'RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class' + type: string + schedulerName: + description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. + type: string + securityContext: + description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' + properties: + fsGroup: + description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: 'DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.' + type: string + serviceAccountName: + description: 'ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/' + type: string + setHostnameAsFQDN: + description: If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. + type: boolean + shareProcessNamespace: + description: 'Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.' + type: boolean + subdomain: + description: If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: 'List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes' + items: + description: Volume represents a named volume in a pod that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. \n Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). \n Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. \n Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. \n A pod can use both types of ephemeral volumes and persistent volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). \n An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. \n This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with other supported volume types + properties: + configMap: + description: configMap information about the configMap data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional field specify whether the Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the mount point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no group + type: string + readOnly: + description: readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + replicas: + description: Replicas is the number of desired EphemeralRunner resources in the k8s namespace. + type: integer + type: object + status: + description: EphemeralRunnerSetStatus defines the observed state of EphemeralRunnerSet + properties: + currentReplicas: + description: CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet. + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} + preserveUnknownFields: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/actions-runner-controller-2/templates/NOTES.txt b/charts/actions-runner-controller-2/templates/NOTES.txt new file mode 100644 index 0000000000..44448bda98 --- /dev/null +++ b/charts/actions-runner-controller-2/templates/NOTES.txt @@ -0,0 +1,3 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. diff --git a/charts/actions-runner-controller-2/templates/_helpers.tpl b/charts/actions-runner-controller-2/templates/_helpers.tpl new file mode 100644 index 0000000000..4b5ffaed0b --- /dev/null +++ b/charts/actions-runner-controller-2/templates/_helpers.tpl @@ -0,0 +1,97 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "actions-runner-controller-2.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "actions-runner-controller-2.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "actions-runner-controller-2.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "actions-runner-controller-2.labels" -}} +helm.sh/chart: {{ include "actions-runner-controller-2.chart" . }} +{{ include "actions-runner-controller-2.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/part-of: {{ .Chart.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- range $k, $v := .Values.labels }} +{{ $k }}: {{ $v }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "actions-runner-controller-2.selectorLabels" -}} +app.kubernetes.io/name: {{ include "actions-runner-controller-2.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "actions-runner-controller-2.serviceAccountName" -}} +{{- if eq .Values.serviceAccount.name "default"}} +{{- fail "serviceAccount.name cannot be set to 'default'" }} +{{- end }} +{{- if .Values.serviceAccount.create }} +{{- default (include "actions-runner-controller-2.fullname" .) .Values.serviceAccount.name }} +{{- else }} + {{- if not .Values.serviceAccount.name }} +{{- fail "serviceAccount.name must be set if serviceAccount.create is false" }} + {{- else }} +{{- .Values.serviceAccount.name }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "actions-runner-controller-2.managerRoleName" -}} +{{- include "actions-runner-controller-2.fullname" . }}-manager-role +{{- end }} + +{{- define "actions-runner-controller-2.managerRoleBinding" -}} +{{- include "actions-runner-controller-2.fullname" . }}-manager-rolebinding +{{- end }} + +{{- define "actions-runner-controller-2.leaderElectionRoleName" -}} +{{- include "actions-runner-controller-2.fullname" . }}-leader-election-role +{{- end }} + +{{- define "actions-runner-controller-2.leaderElectionRoleBinding" -}} +{{- include "actions-runner-controller-2.fullname" . }}-leader-election-rolebinding +{{- end }} + +{{- define "actions-runner-controller-2.imagePullSecretsNames" -}} +{{- $names := list }} +{{- range $k, $v := . }} +{{- $names = append $names $v.name }} +{{- end }} +{{- $names | join ","}} +{{- end }} \ No newline at end of file diff --git a/charts/actions-runner-controller-2/templates/deployment.yaml b/charts/actions-runner-controller-2/templates/deployment.yaml new file mode 100644 index 0000000000..37185152bc --- /dev/null +++ b/charts/actions-runner-controller-2/templates/deployment.yaml @@ -0,0 +1,98 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "actions-runner-controller-2.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "actions-runner-controller-2.labels" . | nindent 4 }} +spec: + replicas: {{ default 1 .Values.replicaCount }} + selector: + matchLabels: + {{- include "actions-runner-controller-2.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: "manager" + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app.kubernetes.io/part-of: actions-runner-controller + app.kubernetes.io/component: controller-manager + app.kubernetes.io/version: {{ .Chart.Version }} + {{- include "actions-runner-controller-2.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "actions-runner-controller-2.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: "{{ . }}" + {{- end }} + containers: + - name: manager + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "--auto-scaling-runner-set-only" + {{- if gt (int (default 1 .Values.replicaCount)) 1 }} + - "--enable-leader-election" + - "--leader-election-id={{ include "actions-runner-controller-2.fullname" . }}" + {{- end }} + {{- with .Values.imagePullSecrets }} + - "--auto-scaler-image-pull-secrets={{ include "actions-runner-controller-2.imagePullSecretsNames" . }}" + {{- end }} + command: + - "/manager" + env: + - name: CONTROLLER_MANAGER_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: CONTROLLER_MANAGER_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- with .Values.env }} + {{- if kindIs "slice" .Values.env }} + {{- toYaml .Values.env | nindent 8 }} + {{- else }} + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - mountPath: /tmp + name: tmp + terminationGracePeriodSeconds: 10 + volumes: + - name: tmp + emptyDir: {} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/charts/actions-runner-controller-2/templates/leader_election_role.yaml b/charts/actions-runner-controller-2/templates/leader_election_role.yaml new file mode 100644 index 0000000000..72c18189a6 --- /dev/null +++ b/charts/actions-runner-controller-2/templates/leader_election_role.yaml @@ -0,0 +1,12 @@ +{{- if gt (int (default 1 .Values.replicaCount)) 1 -}} +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "actions-runner-controller-2.leaderElectionRoleName" . }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +{{- end }} \ No newline at end of file diff --git a/charts/actions-runner-controller-2/templates/leader_election_role_binding.yaml b/charts/actions-runner-controller-2/templates/leader_election_role_binding.yaml new file mode 100644 index 0000000000..3ab4d9ee6d --- /dev/null +++ b/charts/actions-runner-controller-2/templates/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +{{- if gt (int (default 1 .Values.replicaCount)) 1 -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "actions-runner-controller-2.leaderElectionRoleBinding" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "actions-runner-controller-2.leaderElectionRoleName" . }} +subjects: +- kind: ServiceAccount + name: {{ include "actions-runner-controller-2.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/charts/actions-runner-controller-2/templates/manager_role.yaml b/charts/actions-runner-controller-2/templates/manager_role.yaml new file mode 100644 index 0000000000..6b68e603c8 --- /dev/null +++ b/charts/actions-runner-controller-2/templates/manager_role.yaml @@ -0,0 +1,250 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "actions-runner-controller-2.managerRoleName" . }} +rules: +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets/finalizers + verbs: + - update +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets/status + verbs: + - get + - patch + - update +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners/status + verbs: + - get + - patch + - update +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners/finalizers + verbs: + - update +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets/status + verbs: + - get + - patch + - update +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - namespaces + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces/status + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - watch + - update +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - create + - delete + - get + - update + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + verbs: + - create + - delete + - get + - update + - list + - watch +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create + - get +- apiGroups: + - "" + resources: + - pods/log + verbs: + - get + - list + - watch +- apiGroups: + - "batch" + resources: + - jobs + verbs: + - get + - list + - create + - delete diff --git a/charts/actions-runner-controller-2/templates/manager_role_binding.yaml b/charts/actions-runner-controller-2/templates/manager_role_binding.yaml new file mode 100644 index 0000000000..cf8d6696f6 --- /dev/null +++ b/charts/actions-runner-controller-2/templates/manager_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "actions-runner-controller-2.managerRoleBinding" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "actions-runner-controller-2.managerRoleName" . }} +subjects: +- kind: ServiceAccount + name: {{ include "actions-runner-controller-2.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/charts/actions-runner-controller-2/templates/serviceaccount.yaml b/charts/actions-runner-controller-2/templates/serviceaccount.yaml new file mode 100644 index 0000000000..0032039322 --- /dev/null +++ b/charts/actions-runner-controller-2/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "actions-runner-controller-2.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "actions-runner-controller-2.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/actions-runner-controller-2/tests/template_test.go b/charts/actions-runner-controller-2/tests/template_test.go new file mode 100644 index 0000000000..6459fb0336 --- /dev/null +++ b/charts/actions-runner-controller-2/tests/template_test.go @@ -0,0 +1,508 @@ +package tests + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" +) + +func TestTemplate_CreateServiceAccount(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "serviceAccount.create": "true", + "serviceAccount.annotations.foo": "bar", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"}) + + var serviceAccount corev1.ServiceAccount + helm.UnmarshalK8SYaml(t, output, &serviceAccount) + + assert.Equal(t, namespaceName, serviceAccount.Namespace) + assert.Equal(t, "test-arc-actions-runner-controller-2", serviceAccount.Name) + assert.Equal(t, "bar", string(serviceAccount.Annotations["foo"])) +} + +func TestTemplate_CreateServiceAccount_OverwriteName(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "serviceAccount.create": "true", + "serviceAccount.name": "overwritten-name", + "serviceAccount.annotations.foo": "bar", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"}) + + var serviceAccount corev1.ServiceAccount + helm.UnmarshalK8SYaml(t, output, &serviceAccount) + + assert.Equal(t, namespaceName, serviceAccount.Namespace) + assert.Equal(t, "overwritten-name", serviceAccount.Name) + assert.Equal(t, "bar", string(serviceAccount.Annotations["foo"])) +} + +func TestTemplate_CreateServiceAccount_CannotUseDefaultServiceAccount(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "serviceAccount.create": "true", + "serviceAccount.name": "default", + "serviceAccount.annotations.foo": "bar", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"}) + assert.ErrorContains(t, err, "serviceAccount.name cannot be set to 'default'", "We should get an error because the default service account cannot be used") +} + +func TestTemplate_NotCreateServiceAccount(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "serviceAccount.create": "false", + "serviceAccount.name": "overwritten-name", + "serviceAccount.annotations.foo": "bar", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"}) + assert.ErrorContains(t, err, "could not find template templates/serviceaccount.yaml in chart", "We should get an error because the template should be skipped") +} + +func TestTemplate_NotCreateServiceAccount_ServiceAccountNotSet(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "serviceAccount.create": "false", + "serviceAccount.annotations.foo": "bar", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"}) + assert.ErrorContains(t, err, "serviceAccount.name must be set if serviceAccount.create is false", "We should get an error because the default service account cannot be used") +} + +func TestTemplate_CreateManagerRole(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{}, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"}) + + var managerRole rbacv1.ClusterRole + helm.UnmarshalK8SYaml(t, output, &managerRole) + + assert.Empty(t, managerRole.Namespace, "ClusterRole should not have a namespace") + assert.Equal(t, "test-arc-actions-runner-controller-2-manager-role", managerRole.Name) + assert.Equal(t, 25, len(managerRole.Rules)) +} + +func TestTemplate_ManagerRoleBinding(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "serviceAccount.create": "true", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"}) + + var managerRoleBinding rbacv1.ClusterRoleBinding + helm.UnmarshalK8SYaml(t, output, &managerRoleBinding) + + assert.Empty(t, managerRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace") + assert.Equal(t, "test-arc-actions-runner-controller-2-manager-rolebinding", managerRoleBinding.Name) + assert.Equal(t, "test-arc-actions-runner-controller-2-manager-role", managerRoleBinding.RoleRef.Name) + assert.Equal(t, "test-arc-actions-runner-controller-2", managerRoleBinding.Subjects[0].Name) + assert.Equal(t, namespaceName, managerRoleBinding.Subjects[0].Namespace) +} + +func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "image.tag": "dev", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"}) + + var deployment appsv1.Deployment + helm.UnmarshalK8SYaml(t, output, &deployment) + + assert.Equal(t, namespaceName, deployment.Namespace) + assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Name) + assert.Equal(t, "actions-runner-controller-2-0.1.0", deployment.Labels["helm.sh/chart"]) + assert.Equal(t, "actions-runner-controller-2", deployment.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) + assert.Equal(t, "preview", deployment.Labels["app.kubernetes.io/version"]) + assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) + + assert.Equal(t, int32(1), *deployment.Spec.Replicas) + + assert.Equal(t, "actions-runner-controller-2", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) + assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"]) + + assert.Equal(t, "actions-runner-controller-2", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"]) + + assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"]) + + assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0) + assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Spec.Template.Spec.ServiceAccountName) + assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext) + assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName) + assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds) + assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1) + assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name) + assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir) + + assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0) + assert.Nil(t, deployment.Spec.Template.Spec.Affinity) + assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0) + + assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) + assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) + assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 1) + assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2) + assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) + assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath) + + assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name) + assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath) + + assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources) + assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1) + assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name) + assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath) +} + +func TestTemplate_ControllerDeployment_Customize(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "labels.foo": "bar", + "labels.github": "actions", + "replicaCount": "1", + "image.pullPolicy": "Always", + "image.tag": "dev", + "imagePullSecrets[0].name": "dockerhub", + "nameOverride": "actions-runner-controller-2-override", + "fullnameOverride": "actions-runner-controller-2-fullname-override", + "serviceAccount.name": "actions-runner-controller-2-sa", + "podAnnotations.foo": "bar", + "podSecurityContext.fsGroup": "1000", + "securityContext.runAsUser": "1000", + "securityContext.runAsNonRoot": "true", + "resources.limits.cpu": "500m", + "nodeSelector.foo": "bar", + "tolerations[0].key": "foo", + "affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo", + "affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar", + "priorityClassName": "test-priority-class", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"}) + + var deployment appsv1.Deployment + helm.UnmarshalK8SYaml(t, output, &deployment) + + assert.Equal(t, namespaceName, deployment.Namespace) + assert.Equal(t, "actions-runner-controller-2-fullname-override", deployment.Name) + assert.Equal(t, "actions-runner-controller-2-0.1.0", deployment.Labels["helm.sh/chart"]) + assert.Equal(t, "actions-runner-controller-2-override", deployment.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) + assert.Equal(t, "preview", deployment.Labels["app.kubernetes.io/version"]) + assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) + assert.Equal(t, "bar", deployment.Labels["foo"]) + assert.Equal(t, "actions", deployment.Labels["github"]) + + assert.Equal(t, int32(1), *deployment.Spec.Replicas) + + assert.Equal(t, "actions-runner-controller-2-override", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) + assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"]) + + assert.Equal(t, "actions-runner-controller-2-override", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"]) + + assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"]) + assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"]) + + assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1) + assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name) + assert.Equal(t, "actions-runner-controller-2-sa", deployment.Spec.Template.Spec.ServiceAccountName) + assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup) + assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName) + assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds) + assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1) + assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name) + assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir) + + assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 1) + assert.Equal(t, "bar", deployment.Spec.Template.Spec.NodeSelector["foo"]) + + assert.NotNil(t, deployment.Spec.Template.Spec.Affinity.NodeAffinity) + assert.Equal(t, "foo", deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key) + assert.Equal(t, "bar", string(deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Operator)) + + assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 1) + assert.Equal(t, "foo", deployment.Spec.Template.Spec.Tolerations[0].Key) + + assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) + assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, corev1.PullAlways, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) + assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 2) + assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) + assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1]) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2) + assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) + assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath) + + assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name) + assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath) + + assert.Equal(t, "500m", deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String()) + assert.True(t, *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot) + assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1) + assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name) + assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath) +} + +func TestTemplate_EnableLeaderElectionRole(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "replicaCount": "2", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/leader_election_role.yaml"}) + + var leaderRole rbacv1.Role + helm.UnmarshalK8SYaml(t, output, &leaderRole) + + assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-role", leaderRole.Name) + assert.Equal(t, namespaceName, leaderRole.Namespace) +} + +func TestTemplate_EnableLeaderElectionRoleBinding(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "replicaCount": "2", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/leader_election_role_binding.yaml"}) + + var leaderRoleBinding rbacv1.RoleBinding + helm.UnmarshalK8SYaml(t, output, &leaderRoleBinding) + + assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-rolebinding", leaderRoleBinding.Name) + assert.Equal(t, namespaceName, leaderRoleBinding.Namespace) + assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-role", leaderRoleBinding.RoleRef.Name) + assert.Equal(t, "test-arc-actions-runner-controller-2", leaderRoleBinding.Subjects[0].Name) +} + +func TestTemplate_EnableLeaderElection(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "replicaCount": "2", + "image.tag": "dev", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"}) + + var deployment appsv1.Deployment + helm.UnmarshalK8SYaml(t, output, &deployment) + + assert.Equal(t, namespaceName, deployment.Namespace) + assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Name) + + assert.Equal(t, int32(2), *deployment.Spec.Replicas) + + assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) + assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) + assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3) + assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) + assert.Equal(t, "--enable-leader-election", deployment.Spec.Template.Spec.Containers[0].Args[1]) + assert.Equal(t, "--leader-election-id=test-arc-actions-runner-controller-2", deployment.Spec.Template.Spec.Containers[0].Args[2]) +} + +func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "imagePullSecrets[0].name": "dockerhub", + "imagePullSecrets[1].name": "ghcr", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"}) + + var deployment appsv1.Deployment + helm.UnmarshalK8SYaml(t, output, &deployment) + + assert.Equal(t, namespaceName, deployment.Namespace) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 2) + assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) + assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1]) +} diff --git a/charts/actions-runner-controller-2/values.yaml b/charts/actions-runner-controller-2/values.yaml new file mode 100644 index 0000000000..cc139655e1 --- /dev/null +++ b/charts/actions-runner-controller-2/values.yaml @@ -0,0 +1,65 @@ +# Default values for actions-runner-controller-2. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +labels: {} + +# leaderElection will be enabled when replicaCount>1, +# So, only one replica will in charge of reconciliation at a given time +# leaderElectionId will be set to {{ define actions-runner-controller-2.fullname }}. +replicaCount: 1 + +image: + repository: "ghcr.io/actions/actions-runner-controller-2" + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created for running the controller pod + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # You can not use the default service account for this. + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# Leverage a PriorityClass to ensure your pods survive resource shortages +# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# PriorityClass: system-cluster-critical +priorityClassName: "" \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/.helmignore b/charts/auto-scaling-runner-set/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/charts/auto-scaling-runner-set/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/auto-scaling-runner-set/Chart.yaml b/charts/auto-scaling-runner-set/Chart.yaml new file mode 100644 index 0000000000..6a41e511ad --- /dev/null +++ b/charts/auto-scaling-runner-set/Chart.yaml @@ -0,0 +1,33 @@ +apiVersion: v2 +name: auto-scaling-runner-set +description: A Helm chart for deploying an AutoScalingRunnerSet + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.1.0" + +home: https://github.com/actions/dev-arc + +sources: + - "https://github.com/actions/dev-arc" + +maintainers: + - name: actions + url: https://github.com/actions \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/ci/ci-values.yaml b/charts/auto-scaling-runner-set/ci/ci-values.yaml new file mode 100644 index 0000000000..3497fcab8c --- /dev/null +++ b/charts/auto-scaling-runner-set/ci/ci-values.yaml @@ -0,0 +1,6 @@ +# Set the following to dummy values. +# This is only useful in CI +githubConfigUrl: https://github.com/actions/actions-runner-controller + +githubConfigSecret: + github_token: test \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/templates/NOTES.txt b/charts/auto-scaling-runner-set/templates/NOTES.txt new file mode 100644 index 0000000000..19547d0db7 --- /dev/null +++ b/charts/auto-scaling-runner-set/templates/NOTES.txt @@ -0,0 +1,3 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/templates/_helpers.tpl b/charts/auto-scaling-runner-set/templates/_helpers.tpl new file mode 100644 index 0000000000..13889c0a1c --- /dev/null +++ b/charts/auto-scaling-runner-set/templates/_helpers.tpl @@ -0,0 +1,313 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "auto-scaling-runner-set.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "auto-scaling-runner-set.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "auto-scaling-runner-set.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "auto-scaling-runner-set.labels" -}} +helm.sh/chart: {{ include "auto-scaling-runner-set.chart" . }} +{{ include "auto-scaling-runner-set.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "auto-scaling-runner-set.selectorLabels" -}} +app.kubernetes.io/name: {{ include "auto-scaling-runner-set.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{- define "auto-scaling-runner-set.githubsecret" -}} +{{- include "auto-scaling-runner-set.fullname" . }}-github-secret +{{- end }} + +{{- define "auto-scaling-runner-set.noPermissionServiceAccountName" -}} +{{- include "auto-scaling-runner-set.fullname" . }}-no-permission-service-account +{{- end }} + +{{- define "auto-scaling-runner-set.kubeModeRoleName" -}} +{{- include "auto-scaling-runner-set.fullname" . }}-kube-mode-role +{{- end }} + +{{- define "auto-scaling-runner-set.kubeModeServiceAccountName" -}} +{{- include "auto-scaling-runner-set.fullname" . }}-kube-mode-service-account +{{- end }} + +{{- define "auto-scaling-runner-set.dind-init-container" -}} +{{- range $i, $val := .Values.template.spec.containers -}} +{{- if eq $val.name "runner" -}} +image: {{ $val.image }} +{{- if $val.imagePullSecrets }} +imagePullSecrets: + {{ $val.imagePullSecrets | toYaml -}} +{{- end }} +command: ["cp"] +args: ["-r", "-v", "/actions-runner/externals/.", "/actions-runner/tmpDir/"] +volumeMounts: + - name: dind-externals + mountPath: /actions-runner/tmpDir +{{- end }} +{{- end }} +{{- end }} + +{{- define "auto-scaling-runner-set.dind-container" -}} +image: docker:dind +securityContext: + privileged: true +volumeMounts: + - name: work + mountPath: /actions-runner/_work + - name: dind-cert + mountPath: /certs/client + - name: dind-externals + mountPath: /actions-runner/externals +{{- end }} + +{{- define "auto-scaling-runner-set.dind-volume" -}} +- name: dind-cert + emptyDir: {} +- name: dind-externals + emptyDir: {} +{{- end }} + +{{- define "auto-scaling-runner-set.dind-work-volume" -}} +{{- $createWorkVolume := 1 }} + {{- range $i, $volume := .Values.template.spec.volumes }} + {{- if eq $volume.name "work" }} + {{- $createWorkVolume = 0 -}} +- name: work + {{- range $key, $val := $volume }} + {{- if ne $key "name" }} + {{ $key }}: {{ $val }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if eq $createWorkVolume 1 }} +- name: work + emptyDir: {} + {{- end }} +{{- end }} + +{{- define "auto-scaling-runner-set.kubernetes-mode-work-volume" -}} +{{- $createWorkVolume := 1 }} + {{- range $i, $volume := .Values.template.spec.volumes }} + {{- if eq $volume.name "work" }} + {{- $createWorkVolume = 0 -}} +- name: work + {{- range $key, $val := $volume }} + {{- if ne $key "name" }} + {{ $key }}: {{ $val }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if eq $createWorkVolume 1 }} +- name: work + ephemeral: + volumeClaimTemplate: + spec: + {{- .Values.containerMode.kubernetesModeWorkVolumeClaim | toYaml | nindent 8 }} + {{- end }} +{{- end }} + +{{- define "auto-scaling-runner-set.non-work-volumes" -}} + {{- range $i, $volume := .Values.template.spec.volumes }} + {{- if ne $volume.name "work" }} +- name: {{ $volume.name }} + {{- range $key, $val := $volume }} + {{- if ne $key "name" }} + {{ $key }}: {{ $val }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} + +{{- define "auto-scaling-runner-set.non-runner-containers" -}} + {{- range $i, $container := .Values.template.spec.containers -}} + {{- if ne $container.name "runner" -}} +- name: {{ $container.name }} + {{- range $key, $val := $container }} + {{- if ne $key "name" }} + {{ $key }}: {{ $val }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} + +{{- define "auto-scaling-runner-set.dind-runner-container" -}} +{{- range $i, $container := .Values.template.spec.containers -}} + {{- if eq $container.name "runner" -}} + {{- range $key, $val := $container }} + {{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }} +{{ $key }}: {{ $val }} + {{- end }} + {{- end }} + {{- $setDockerHost := 1 }} + {{- $setDockerTlsVerify := 1 }} + {{- $setDockerCertPath := 1 }} +env: + {{- with $container.env }} + {{- range $i, $env := . }} + {{- if eq $env.name "DOCKER_HOST" }} + {{- $setDockerHost = 0 -}} + {{- end }} + {{- if eq $env.name "DOCKER_TLS_VERIFY" }} + {{- $setDockerTlsVerify = 0 -}} + {{- end }} + {{- if eq $env.name "DOCKER_CERT_PATH" }} + {{- $setDockerCertPath = 0 -}} + {{- end }} + - name: {{ $env.name }} + {{- range $envKey, $envVal := $env }} + {{- if ne $envKey "name" }} + {{ $envKey }}: {{ $envVal | toYaml | nindent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- if $setDockerHost }} + - name: DOCKER_HOST + value: tcp://localhost:2376 + {{- end }} + {{- if $setDockerTlsVerify }} + - name: DOCKER_TLS_VERIFY + value: "1" + {{- end }} + {{- if $setDockerCertPath }} + - name: DOCKER_CERT_PATH + value: /certs/client + {{- end }} + {{- end }} + {{- $mountWork := 1 }} + {{- $mountDindCert := 1 }} +volumeMounts: + {{- with $container.volumeMounts }} + {{- range $i, $volMount := . }} + {{- if eq $volMount.name "work" }} + {{- $mountWork = 0 -}} + {{- end }} + {{- if eq $volMount.name "dind-cert" }} + {{- $mountDindCert = 0 -}} + {{- end }} + - name: {{ $volMount.name }} + {{- range $mountKey, $mountVal := $volMount }} + {{- if ne $mountKey "name" }} + {{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $mountWork }} + - name: work + mountPath: /actions-runner/_work + {{- end }} + {{- if $mountDindCert }} + - name: dind-cert + mountPath: /certs/client + {{- end }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "auto-scaling-runner-set.kubernetes-mode-runner-container" -}} +{{- range $i, $container := .Values.template.spec.containers -}} + {{- if eq $container.name "runner" -}} + {{- range $key, $val := $container }} + {{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }} +{{ $key }}: {{ $val }} + {{- end }} + {{- end }} + {{- $setContainerHooks := 1 }} + {{- $setPodName := 1 }} + {{- $setRequireJobContainer := 1 }} +env: + {{- with $container.env }} + {{- range $i, $env := . }} + {{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }} + {{- $setContainerHooks = 0 -}} + {{- end }} + {{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }} + {{- $setPodName = 0 -}} + {{- end }} + {{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }} + {{- $setRequireJobContainer = 0 -}} + {{- end }} + - name: {{ $env.name }} + {{- range $envKey, $envVal := $env }} + {{- if ne $envKey "name" }} + {{ $envKey }}: {{ $envVal | toYaml | nindent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $setContainerHooks }} + - name: ACTIONS_RUNNER_CONTAINER_HOOKS + value: /actions-runner/k8s/index.js + {{- end }} + {{- if $setPodName }} + - name: ACTIONS_RUNNER_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- end }} + {{- if $setRequireJobContainer }} + - name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER + value: "true" + {{- end }} + {{- $mountWork := 1 }} +volumeMounts: + {{- with $container.volumeMounts }} + {{- range $i, $volMount := . }} + {{- if eq $volMount.name "work" }} + {{- $mountWork = 0 -}} + {{- end }} + - name: {{ $volMount.name }} + {{- range $mountKey, $mountVal := $volMount }} + {{- if ne $mountKey "name" }} + {{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $mountWork }} + - name: work + mountPath: /actions-runner/_work + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml b/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml new file mode 100644 index 0000000000..7794d4b321 --- /dev/null +++ b/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml @@ -0,0 +1,91 @@ +apiVersion: actions.github.com/v1alpha1 +kind: AutoscalingRunnerSet +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "auto-scaling-runner-set.labels" . | nindent 4 }} +spec: + githubConfigUrl: {{ required ".Values.githubConfigUrl is required" .Values.githubConfigUrl }} + githubConfigSecret: {{ include "auto-scaling-runner-set.githubsecret" . }} + {{- with .Values.runnerGroup }} + runnerGroup: {{ . }} + {{- end }} + + {{- if and (kindIs "int64" .Values.minRunners) (kindIs "int64" .Values.maxRunners) }} + {{- if gt .Values.minRunners .Values.maxRunners }} + {{- fail "maxRunners has to be greater or equal to minRunners" }} + {{- end }} + {{- end }} + + {{- if kindIs "int64" .Values.maxRunners }} + {{- if lt .Values.maxRunners 0 }} + {{- fail "maxRunners has to be greater or equal to 0" }} + {{- end }} + maxRunners: {{ .Values.maxRunners | int }} + {{- end }} + + {{- if kindIs "int64" .Values.minRunners }} + {{- if lt .Values.minRunners 0 }} + {{- fail "minRunners has to be greater or equal to 0" }} + {{- end }} + minRunners: {{ .Values.minRunners | int }} + {{- end }} + + template: + {{- with .Values.template.metadata }} + metadata: + {{- with .labels }} + labels: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- range $key, $val := .Values.template.spec }} + {{- if and (ne $key "containers") (ne $key "volumes") (ne $key "initContainers") (ne $key "serviceAccountName") }} + {{ $key }}: {{ $val | toYaml | nindent 8 }} + {{- end }} + {{- end }} + {{- if eq .Values.containerMode.type "kubernetes" }} + serviceAccountName: {{ default (include "auto-scaling-runner-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }} + {{- else }} + serviceAccountName: {{ default (include "auto-scaling-runner-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }} + {{- end }} + {{- if or .Values.template.spec.initContainers (eq .Values.containerMode.type "dind") }} + initContainers: + {{- if eq .Values.containerMode.type "dind" }} + - name: init-dind-externals + {{- include "auto-scaling-runner-set.dind-init-container" . | nindent 8 }} + {{- end }} + {{- with .Values.template.spec.initContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + containers: + {{- if eq .Values.containerMode.type "dind" }} + - name: runner + {{- include "auto-scaling-runner-set.dind-runner-container" . | nindent 8 }} + - name: dind + {{- include "auto-scaling-runner-set.dind-container" . | nindent 8 }} + {{- include "auto-scaling-runner-set.non-runner-containers" . | nindent 6 }} + {{- else if eq .Values.containerMode.type "kubernetes" }} + - name: runner + {{- include "auto-scaling-runner-set.kubernetes-mode-runner-container" . | nindent 8 }} + {{- include "auto-scaling-runner-set.non-runner-containers" . | nindent 6 }} + {{- else }} + {{ .Values.template.spec.containers | toYaml | nindent 6 }} + {{- end }} + {{- if or .Values.template.spec.volumes (eq .Values.containerMode.type "dind") (eq .Values.containerMode.type "kubernetes") }} + volumes: + {{- if eq .Values.containerMode.type "dind" }} + {{- include "auto-scaling-runner-set.dind-volume" . | nindent 6 }} + {{- include "auto-scaling-runner-set.dind-work-volume" . | nindent 6 }} + {{- else if eq .Values.containerMode.type "kubernetes" }} + {{- include "auto-scaling-runner-set.kubernetes-mode-work-volume" . | nindent 6 }} + {{- end }} + {{- include "auto-scaling-runner-set.non-work-volumes" . | nindent 6 }} + {{- end }} \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/templates/githubsecret.yaml b/charts/auto-scaling-runner-set/templates/githubsecret.yaml new file mode 100644 index 0000000000..73e84a7a8d --- /dev/null +++ b/charts/auto-scaling-runner-set/templates/githubsecret.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "auto-scaling-runner-set.githubsecret" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "auto-scaling-runner-set.labels" . | nindent 4 }} + finalizers: + - actions.github.com/secret-protection +data: + {{- $hasToken := false }} + {{- $hasAppId := false }} + {{- $hasInstallationId := false }} + {{- $hasPrivateKey := false }} + {{- range $secretName, $secretValue := (required "Values.githubConfigSecret is required for setting auth with GitHub server." .Values.githubConfigSecret) }} + {{- if $secretValue }} + {{ $secretName }}: {{ $secretValue | toString | b64enc }} + {{- if eq $secretName "github_token" }} + {{- $hasToken = true }} + {{- end }} + {{- if eq $secretName "github_app_id" }} + {{- $hasAppId = true }} + {{- end }} + {{- if eq $secretName "github_app_installation_id" }} + {{- $hasInstallationId = true }} + {{- end }} + {{- if eq $secretName "github_app_private_key" }} + {{- $hasPrivateKey = true }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (not $hasToken) (not ($hasAppId)) }} + {{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_token or .Values.githubConfigSecret.github_app_id." }} + {{- end }} + {{- if and $hasAppId (or (not $hasInstallationId) (not $hasPrivateKey)) }} + {{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key." }} + {{- end }} diff --git a/charts/auto-scaling-runner-set/templates/kube_mode_role.yaml b/charts/auto-scaling-runner-set/templates/kube_mode_role.yaml new file mode 100644 index 0000000000..a12d02d585 --- /dev/null +++ b/charts/auto-scaling-runner-set/templates/kube_mode_role.yaml @@ -0,0 +1,24 @@ +{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} +# default permission for runner pod service account in kubernetes mode (container hook) +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "auto-scaling-runner-set.kubeModeRoleName" . }} + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "create", "delete"] +- apiGroups: [""] + resources: ["pods/exec"] + verbs: ["get", "create"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get", "list", "watch",] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["get", "list", "create", "delete"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "create", "delete"] +{{- end }} \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/templates/kube_mode_role_binding.yaml b/charts/auto-scaling-runner-set/templates/kube_mode_role_binding.yaml new file mode 100644 index 0000000000..eafdda5e73 --- /dev/null +++ b/charts/auto-scaling-runner-set/templates/kube_mode_role_binding.yaml @@ -0,0 +1,15 @@ +{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "auto-scaling-runner-set.kubeModeRoleName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "auto-scaling-runner-set.kubeModeRoleName" . }} +subjects: +- kind: ServiceAccount + name: {{ include "auto-scaling-runner-set.kubeModeServiceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/templates/kube_mode_serviceaccount.yaml b/charts/auto-scaling-runner-set/templates/kube_mode_serviceaccount.yaml new file mode 100644 index 0000000000..60dc0a97c9 --- /dev/null +++ b/charts/auto-scaling-runner-set/templates/kube_mode_serviceaccount.yaml @@ -0,0 +1,9 @@ +{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "auto-scaling-runner-set.kubeModeServiceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "auto-scaling-runner-set.labels" . | nindent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/templates/no_permission_serviceaccount.yaml b/charts/auto-scaling-runner-set/templates/no_permission_serviceaccount.yaml new file mode 100644 index 0000000000..07608bc9cf --- /dev/null +++ b/charts/auto-scaling-runner-set/templates/no_permission_serviceaccount.yaml @@ -0,0 +1,9 @@ +{{- if and (ne .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "auto-scaling-runner-set.noPermissionServiceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "auto-scaling-runner-set.labels" . | nindent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/tests/template_test.go b/charts/auto-scaling-runner-set/tests/template_test.go new file mode 100644 index 0000000000..51a8840a10 --- /dev/null +++ b/charts/auto-scaling-runner-set/tests/template_test.go @@ -0,0 +1,606 @@ +package tests + +import ( + "path/filepath" + "strings" + "testing" + + v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" +) + +func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"}) + + var githubSecret corev1.Secret + helm.UnmarshalK8SYaml(t, output, &githubSecret) + + assert.Equal(t, namespaceName, githubSecret.Namespace) + assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", githubSecret.Name) + assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"])) + assert.Equal(t, "actions.github.com/secret-protection", githubSecret.Finalizers[0]) +} + +func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_app_id": "10", + "githubConfigSecret.github_app_installation_id": "100", + "githubConfigSecret.github_app_private_key": "private_key", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"}) + + var githubSecret corev1.Secret + helm.UnmarshalK8SYaml(t, output, &githubSecret) + + assert.Equal(t, namespaceName, githubSecret.Namespace) + assert.Equal(t, "10", string(githubSecret.Data["github_app_id"])) + assert.Equal(t, "100", string(githubSecret.Data["github_app_installation_id"])) + assert.Equal(t, "private_key", string(githubSecret.Data["github_app_private_key"])) +} + +func TestTemplateRenderedGitHubSecretErrorWithMissingAuthInput(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_app_id": "", + "githubConfigSecret.github_token": "", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"}) + require.Error(t, err) + + assert.ErrorContains(t, err, "provide .Values.githubConfigSecret.github_token or .Values.githubConfigSecret.github_app_id") +} + +func TestTemplateRenderedGitHubSecretErrorWithMissingAppInput(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_app_id": "10", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"}) + require.Error(t, err) + + assert.ErrorContains(t, err, "provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key") +} + +func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/no_permission_serviceaccount.yaml"}) + var serviceAccount corev1.ServiceAccount + helm.UnmarshalK8SYaml(t, output, &serviceAccount) + + assert.Equal(t, namespaceName, serviceAccount.Namespace) + assert.Equal(t, "test-runners-auto-scaling-runner-set-no-permission-service-account", serviceAccount.Name) + + output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, "test-runners-auto-scaling-runner-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName) +} + +func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "containerMode.type": "kubernetes", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"}) + var serviceAccount corev1.ServiceAccount + helm.UnmarshalK8SYaml(t, output, &serviceAccount) + + assert.Equal(t, namespaceName, serviceAccount.Namespace) + assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-service-account", serviceAccount.Name) + + output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"}) + var role rbacv1.Role + helm.UnmarshalK8SYaml(t, output, &role) + + assert.Equal(t, namespaceName, role.Namespace) + assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-role", role.Name) + assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules") + assert.Equal(t, "pods", role.Rules[0].Resources[0]) + assert.Equal(t, "pods/exec", role.Rules[1].Resources[0]) + assert.Equal(t, "pods/log", role.Rules[2].Resources[0]) + assert.Equal(t, "jobs", role.Rules[3].Resources[0]) + assert.Equal(t, "secrets", role.Rules[4].Resources[0]) + + output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role_binding.yaml"}) + var roleBinding rbacv1.RoleBinding + helm.UnmarshalK8SYaml(t, output, &roleBinding) + + assert.Equal(t, namespaceName, roleBinding.Namespace) + assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-role", roleBinding.Name) + assert.Len(t, roleBinding.Subjects, 1) + assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-service-account", roleBinding.Subjects[0].Name) + assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace) + assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-role", roleBinding.RoleRef.Name) + assert.Equal(t, "Role", roleBinding.RoleRef.Kind) + + output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-service-account", ars.Spec.Template.Spec.ServiceAccountName) +} + +func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "template.spec.serviceAccountName": "test-service-account", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/no_permission_serviceaccount.yaml"}) + assert.ErrorContains(t, err, "could not find template templates/no_permission_serviceaccount.yaml in chart", "no permission service account should not be rendered") + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, "test-service-account", ars.Spec.Template.Spec.ServiceAccountName) +} + +func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, namespaceName, ars.Namespace) + assert.Equal(t, "test-runners", ars.Name) + + assert.Equal(t, "auto-scaling-runner-set", ars.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) + assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) + assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", ars.Spec.GitHubConfigSecret) + + assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty") + + assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil") + assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil") + assert.Nil(t, ars.Spec.Proxy, "Proxy should be nil") + assert.Nil(t, ars.Spec.GitHubServerTLS, "GitHubServerTLS should be nil") + + assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil") + + assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container") + assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) +} + +func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "template.metadata.labels.test1": "test1", + "template.metadata.labels.test2": "test2", + "template.metadata.annotations.test3": "test3", + "template.metadata.annotations.test4": "test4", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, namespaceName, ars.Namespace) + assert.Equal(t, "test-runners", ars.Name) + + assert.NotNil(t, ars.Spec.Template.Labels, "Template.Spec.Labels should not be nil") + assert.Equal(t, "test1", ars.Spec.Template.Labels["test1"], "Template.Spec.Labels should have test1") + assert.Equal(t, "test2", ars.Spec.Template.Labels["test2"], "Template.Spec.Labels should have test2") + + assert.NotNil(t, ars.Spec.Template.Annotations, "Template.Spec.Annotations should not be nil") + assert.Equal(t, "test3", ars.Spec.Template.Annotations["test3"], "Template.Spec.Annotations should have test3") + assert.Equal(t, "test4", ars.Spec.Template.Annotations["test4"], "Template.Spec.Annotations should have test4") + + assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil") + + assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container") + assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) +} + +func TestTemplateRenderedAutoScalingRunnerSet_MaxRunnersValidationError(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "maxRunners": "-1", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + require.Error(t, err) + + assert.ErrorContains(t, err, "maxRunners has to be greater or equal to 0") +} + +func TestTemplateRenderedAutoScalingRunnerSet_MinRunnersValidationError(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "maxRunners": "1", + "minRunners": "-1", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + require.Error(t, err) + + assert.ErrorContains(t, err, "minRunners has to be greater or equal to 0") +} + +func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationError(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "maxRunners": "0", + "minRunners": "1", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + require.Error(t, err) + + assert.ErrorContains(t, err, "maxRunners has to be greater or equal to minRunners") +} + +func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationSameValue(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "maxRunners": "0", + "minRunners": "0", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, 0, *ars.Spec.MinRunners, "MinRunners should be 0") + assert.Equal(t, 0, *ars.Spec.MaxRunners, "MaxRunners should be 0") +} + +func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMin(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "minRunners": "5", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, 5, *ars.Spec.MinRunners, "MinRunners should be 5") + assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil") +} + +func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMax(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "maxRunners": "5", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, 5, *ars.Spec.MaxRunners, "MaxRunners should be 5") + assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil") +} + +func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "containerMode.type": "dind", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, namespaceName, ars.Namespace) + assert.Equal(t, "test-runners", ars.Name) + + assert.Equal(t, "auto-scaling-runner-set", ars.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) + assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) + assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", ars.Spec.GitHubConfigSecret) + + assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty") + + assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil") + assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil") + assert.Nil(t, ars.Spec.Proxy, "Proxy should be nil") + assert.Nil(t, ars.Spec.GitHubServerTLS, "GitHubServerTLS should be nil") + + assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil") + + assert.Len(t, ars.Spec.Template.Spec.InitContainers, 1, "Template.Spec should have 1 init container") + assert.Equal(t, "init-dind-externals", ars.Spec.Template.Spec.InitContainers[0].Name) + assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.InitContainers[0].Image) + assert.Equal(t, "cp", ars.Spec.Template.Spec.InitContainers[0].Command[0]) + assert.Equal(t, "-r -v /actions-runner/externals/. /actions-runner/tmpDir/", strings.Join(ars.Spec.Template.Spec.InitContainers[0].Args, " ")) + + assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container") + assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) + + assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name) + assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image) +} + +func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "containerMode.type": "kubernetes", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, namespaceName, ars.Namespace) + assert.Equal(t, "test-runners", ars.Name) + + assert.Equal(t, "auto-scaling-runner-set", ars.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) + assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) + assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", ars.Spec.GitHubConfigSecret) + + assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty") + assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil") + assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil") + assert.Nil(t, ars.Spec.Proxy, "Proxy should be nil") + assert.Nil(t, ars.Spec.GitHubServerTLS, "GitHubServerTLS should be nil") + + assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil") + + assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container") + assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) + + assert.Equal(t, "ACTIONS_RUNNER_CONTAINER_HOOKS", ars.Spec.Template.Spec.Containers[0].Env[0].Name) + assert.Equal(t, "/actions-runner/k8s/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value) + assert.Equal(t, "ACTIONS_RUNNER_POD_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name) + assert.Equal(t, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER", ars.Spec.Template.Spec.Containers[0].Env[2].Name) + assert.Equal(t, "true", ars.Spec.Template.Spec.Containers[0].Env[2].Value) + + assert.Len(t, ars.Spec.Template.Spec.Volumes, 1, "Template.Spec should have 1 volume") + assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[0].Name) + assert.NotNil(t, ars.Spec.Template.Spec.Volumes[0].Ephemeral, "Template.Spec should have 1 ephemeral volume") +} diff --git a/charts/auto-scaling-runner-set/values.yaml b/charts/auto-scaling-runner-set/values.yaml new file mode 100644 index 0000000000..2ee11db6c2 --- /dev/null +++ b/charts/auto-scaling-runner-set/values.yaml @@ -0,0 +1,117 @@ +## githubConfigUrl is the GitHub url for where you want to configure runners +## ex: https://github.com/myorg/myrepo or https://github.com/myorg +githubConfigUrl: "" + +## githubConfigSecret is the k8s secrets to use when auth with GitHub API. +## You can choose to use GitHub App or a PAT token +githubConfigSecret: + ### GitHub Apps Configuration + ## NOTE: IDs MUST be strings, use quotes + #github_app_id: "" + #github_app_installation_id: "" + #github_app_private_key: | + + ### GitHub PAT Configuration + github_token: "" + +## maxRunners is the max number of runners the auto scaling runner set will scale up to. +# maxRunners: 5 + +## minRunners is the min number of runners the auto scaling runner set will scale down to. +# minRunners: 0 + +# runnerGroup: "default" + +## template is the PodSpec for each runner Pod +template: + spec: + containers: + - name: runner + image: ghcr.io/actions/actions-runner:latest + command: ["/actions-runner/run.sh"] + +containerMode: + type: "" ## type can be set to dind or kubernetes + ## with containerMode.type=dind, we will populate the template.spec with following pod spec + ## template: + ## spec: + ## initContainers: + ## - name: initExternalsInternalVolume + ## image: ghcr.io/actions/actions-runner:latest + ## command: ["cp", "-r", "-v", "/actions-runner/externals/.", "/actions-runner/tmpDir/"] + ## volumeMounts: + ## - name: externalsInternal + ## mountPath: /actions-runner/tmpDir + ## containers: + ## - name: runner + ## image: ghcr.io/actions/actions-runner:latest + ## env: + ## - name: DOCKER_HOST + ## value: tcp://localhost:2376 + ## - name: DOCKER_TLS_VERIFY + ## value: "1" + ## - name: DOCKER_CERT_PATH + ## value: /certs/client + ## volumeMounts: + ## - name: workingDirectoryInternal + ## mountPath: /actions-runner/_work + ## - name: dinDInternal + ## mountPath: /certs/client + ## readOnly: true + ## - name: dind + ## image: docker:dind + ## securityContext: + ## privileged: true + ## volumeMounts: + ## - mountPath: /certs/client + ## name: dinDInternal + ## - mountPath: /actions-runner/_work + ## name: workingDirectoryInternal + ## - mountPath: /actions-runner/externals + ## name: externalsInternal + ## volumes: + ## - name: dinDInternal + ## emptyDir: {} + ## - name: workingDirectoryInternal + ## emptyDir: {} + ## - name: externalsInternal + ## emptyDir: {} + ###################################################################################################### + ## with containerMode.type=kubernetes, we will populate the template.spec with following pod spec + ## template: + ## spec: + ## containers: + ## - name: runner + ## image: ghcr.io/actions/actions-runner:latest + ## env: + ## - name: ACTIONS_RUNNER_CONTAINER_HOOKS + ## value: /actions-runner/k8s/index.js + ## - name: ACTIONS_RUNNER_POD_NAME + ## valueFrom: + ## fieldRef: + ## fieldPath: metadata.name + ## - name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER + ## value: "true" + ## volumeMounts: + ## - name: work + ## mountPath: /actions-runner/_work + ## volumes: + ## - name: work + ## ephemeral: + ## volumeClaimTemplate: + ## spec: + ## accessModes: [ "ReadWriteOnce" ] + ## storageClassName: "local-path" + ## resources: + ## requests: + ## storage: 1Gi + + ## the following is required when containerMode.type=kubernetes + kubernetesModeWorkVolumeClaim: + accessModes: ["ReadWriteOnce"] + # For testing, use https://github.com/rancher/local-path-provisioner to provide dynamic provision volume + # TODO: remove before release + storageClassName: "dynamic-blob-storage" + resources: + requests: + storage: 1Gi \ No newline at end of file From 464e13980c3efa5dc43c6097ee8f894821e35ee7 Mon Sep 17 00:00:00 2001 From: Hyeonmin Park Date: Wed, 18 Jan 2023 07:38:05 +0900 Subject: [PATCH 024/561] Fix typo in release note for ARC 0.27.0 (#2158) --- docs/releasenotes/0.27.md | 2 +- docs/releasenotes/app-version-mapping.md | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/releasenotes/0.27.md b/docs/releasenotes/0.27.md index ded0970bb2..f823d5aa05 100644 --- a/docs/releasenotes/0.27.md +++ b/docs/releasenotes/0.27.md @@ -40,7 +40,7 @@ spec: pullRequest: {} ``` -You need to update the spec to look like the below, along with enabling the `Workflow Job` events(and disabling unneeded `Push`, `Check Run`, and `Pull Request` evenst) on your webhook setting page on GitHub. +You need to update the spec to look like the below, along with enabling the `Workflow Job` events(and disabling unneeded `Push`, `Check Run`, and `Pull Request` events) on your webhook setting page on GitHub. ```yaml kind: HorizontalRunnerAutoscaler diff --git a/docs/releasenotes/app-version-mapping.md b/docs/releasenotes/app-version-mapping.md index bd32a7c467..695162d7f3 100644 --- a/docs/releasenotes/app-version-mapping.md +++ b/docs/releasenotes/app-version-mapping.md @@ -4,7 +4,8 @@ The following table summarizes the version mapping between controller and chart |Controller (App) Version|Chart Version| |---|---| -|0.26.0|0.21.0| +|0.27.0|0.22.0| +|0.26.0|0.21.1/0.21.0| |0.25.2|0.20.2| |0.25.1|0.20.1| |0.25.0|0.20.0| From 766d96d229a112a49560eb1063e24e00e164666a Mon Sep 17 00:00:00 2001 From: James Bradshaw <40304131+james-bradshaw-coding@users.noreply.github.com> Date: Tue, 17 Jan 2023 15:38:42 -0700 Subject: [PATCH 025/561] Fix minor typos in 0.27.md (#2171) --- docs/releasenotes/0.27.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/releasenotes/0.27.md b/docs/releasenotes/0.27.md index f823d5aa05..1d03c66758 100644 --- a/docs/releasenotes/0.27.md +++ b/docs/releasenotes/0.27.md @@ -8,13 +8,13 @@ This log documents breaking changes and major enhancements ## Upgrading -In case you're using our Helm chart to deploy ARC, use the chart 0.21.0 or greater. Don't miss upgrading CRDs as usual! Helm doesn't upgrade CRDs. +In case you're using our Helm chart to deploy ARC, use the chart 0.22.0 or greater ([current](https://github.com/actions/actions-runner-controller/blob/master/charts/actions-runner-controller/Chart.yaml#L18)). Don't miss upgrading CRDs as usual! Helm doesn't upgrade CRDs. ## BREAKING CHANGE : `workflow_job` became ARC's only supported webhook event as the scale trigger. -In this release, we've removed support for legacy `check_run`, `push`, and `pull_request` webhook events, in favor of `workflow_job` that has been released a year ago. Since then, it served all the use-cases formely and partially supported by the legacy events, and we should be ready to fully migrate to `workflow_job`. +In this release, we've removed support for legacy `check_run`, `push`, and `pull_request` webhook events, in favor of `workflow_job` that has been released a year ago. Since then, it served all the use-cases formerly and partially supported by the legacy events, and we should be ready to fully migrate to `workflow_job`. -Anyone who's still using legacy webook events should see `HorizontalRunnerAutoscaler` specs that look similar to the following examples: +Anyone who's still using legacy webhook events should see `HorizontalRunnerAutoscaler` specs that look similar to the following examples: ```yaml kind: HorizontalRunnerAutoscaler @@ -54,21 +54,21 @@ Relevant PR(s): #2001 ## Fix : Runner pods should work more reliably with cluster-autoscaler -We've fixed many edge-cases in the runner pod termination process which seem to have resulted in various issues, like pods stuck in Terminating, workflow jobs being stuck for 10 minutes or so when an external controller like cluster-autoscaler tried to terminate the runner pod that is still running a workflow job, a workflow job fails due to a job container step being unable access the docker daemon, and so on. +We've fixed many edge-cases in the runner pod termination process which seem to have resulted in various issues, like pods stuck in Terminating, workflow jobs being stuck for 10 minutes or so when an external controller like cluster-autoscaler tried to terminate the runner pod that is still running a workflow job, a workflow job fails due to a job container step being unable to access the docker daemon, and so on. -Do note that you need to set appropariate `RUNNER_GRACEFUL_STOP_TIMEOUT` for both the `docker` sidecar container and the `runner` container specs to let it wait for long and sufficient time for your use-case. +Do note that you need to set appropriate `RUNNER_GRACEFUL_STOP_TIMEOUT` for both the `docker` sidecar container and the `runner` container specs to let it wait for long and sufficient time for your use-case. `RUNNER_GRACEFUL_STOP_TIMEOUT` is basically the longest time the runner stop process to wait until the runner agent to gracefully stop. It's set to `RUNNER_GRACEFUL_STOP_TIMEOUT=15` by default, which might be too short for any use-cases. -For example, in case you're using AWS Spot Instances to power nodes for runner pods, it gives you 2 minutes at the longest. You'd want to set the graceful stop timeout slightly shorter than the 2 minutes, like `110` or `100` seconds depending how much cpu, memory and storage your runner pod is provided. +For example, in case you're using AWS Spot Instances to power nodes for runner pods, it gives you 2 minutes at the longest. You'd want to set the graceful stop timeout slightly shorter than the 2 minutes, like `110` or `100` seconds depending on how much cpu, memory and storage your runner pod is provided. With rich cpu/memory/storage/network resources, the runner agent could stop gracefully well within 10 seconds, making `110` the right setting. With fewer resources, the runner agent could take more than 10 seconds to stop gracefully. If you think it would take 20 seconds for your environment, `100` would be the right setting. `RUNNER_GRACEFUL_STOP_TIMEOUT` is designed to be used to let the runner stop process as long as possible to avoid cancelling the workflow job in the middle of processing, yet avoiding the workflow job to stuck for 10 minutes due to the node disappear before the runner agent cancelling the job. -Under the hood, `RUNNER_GRACEFUL_STOP_TIMEOUT` works by instructing [runner's signal handler](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/runner/graceful-stop.sh#L7) to delay forwarding `SIGTERM` sent by Kubernetes on pod terminatino down to the runner agent. The runner agent is supposed to cancel the workflow job only on `SIGTERM` so making this delay longer allows you to delay cancelling the workfow job, which results in a more graceful period to stop the runner. Practically, the runner pod stops gracefully only when the workflow job running within the runner pod has completed before the runner graceful stop timeout elapses. The timeout can't be forever in practice, although it might theoretically possible depending on your cluster environment. AWS Spot Instances, again for example, gives you 2 minutes to gracefully stop the whole node, and therefore `RUNNER_GRACEFUL_STOP_TIMEOUT` can't be longer than that. +Under the hood, `RUNNER_GRACEFUL_STOP_TIMEOUT` works by instructing [runner's signal handler](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/runner/graceful-stop.sh#L7) to delay forwarding `SIGTERM` sent by Kubernetes on pod termination down to the runner agent. The runner agent is supposed to cancel the workflow job only on `SIGTERM` so making this delay longer allows you to delay cancelling the workflow job, which results in a more graceful period to stop the runner. Practically, the runner pod stops gracefully only when the workflow job running within the runner pod has completed before the runner graceful stop timeout elapses. The timeout can't be forever in practice, although it might theoretically be possible depending on your cluster environment. AWS Spot Instances, again for example, gives you 2 minutes to gracefully stop the whole node, and therefore `RUNNER_GRACEFUL_STOP_TIMEOUT` can't be longer than that. If you have success stories with the new `RUNNER_GRACEFUL_STOP_TIMEOUT`, please don't hesitate to create a `Show and Tell` discussion in our GitHub Discussions to share what configuration worked on which environment, including the name of your cloud provider, the name of managed Kubernetes service, the graceful stop timeout for nodes(defined and provided by the provider or the service) and the runner pods (`RUNNER_GRACEFUL_STOP_TIMEOUT`). @@ -76,11 +76,11 @@ Relevant PR(s): #1759, #1851, #1855 ## ENHANCEMENT : More reliable and customizable "wait-for-docker" feature -You can now add a `WAIT_FOR_DOCKER_SECONDS` envvar to the `runner` container of the runner pod spec to customize how long you want the runner startup script to wait until the docker daemon gets up and running. Previously this has been hard-coded to 120 seconds and it wasn't sufficient in some environments. +You can now add a `WAIT_FOR_DOCKER_SECONDS` envvar to the `runner` container of the runner pod spec to customize how long you want the runner startup script to wait until the docker daemon gets up and running. Previously this has been hard-coded to 120 seconds which wasn't sufficient in some environments. Along with the enhancement, we also fixed a bug in the runner startup script that it didn't exit immediately on the docker startup timeout. The bug resulted in that you see a job container step failing due to missing docker socket. Ideally it should have kept auto-restarting the whole runner pod until you get a fully working runner pod with the working runner agent plus the docker daemon (that started within the timeout), and therefore you should have never seen the job step failing due to docker issue. -We fixed it so it should work as intended now. +We fixed it so that it should work as intended now. Relvant PR(s): #1999 From 9d1ea79f9e45b27a4f1239402d8f48913b3e2299 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 17 Jan 2023 17:38:59 -0500 Subject: [PATCH 026/561] Include extra user-agent for runners created by actions-runner-controller. (#2177) --- cmd/githubrunnerscalesetlistener/main.go | 3 ++- controllers/actions.github.com/constants.go | 3 ++- .../actions.github.com/resourcebuilder.go | 23 ++++++++++++------- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/cmd/githubrunnerscalesetlistener/main.go b/cmd/githubrunnerscalesetlistener/main.go index 583ae2682d..2c9d71b273 100644 --- a/cmd/githubrunnerscalesetlistener/main.go +++ b/cmd/githubrunnerscalesetlistener/main.go @@ -23,6 +23,7 @@ import ( "os/signal" "syscall" + "github.com/actions/actions-runner-controller/build" "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/logging" "github.com/go-logr/logr" @@ -83,7 +84,7 @@ func run(rc RunnerScaleSetListenerConfig, logger logr.Logger) error { } } - actionsServiceClient, err := actions.NewClient(ctx, rc.ConfigureUrl, creds, "actions-runner-controller", logger) + actionsServiceClient, err := actions.NewClient(ctx, rc.ConfigureUrl, creds, fmt.Sprintf("actions-runner-controller/%s", build.Version), logger) if err != nil { return fmt.Errorf("failed to create an Actions Service client: %w", err) } diff --git a/controllers/actions.github.com/constants.go b/controllers/actions.github.com/constants.go index 0ff80d53d0..70f39628d7 100644 --- a/controllers/actions.github.com/constants.go +++ b/controllers/actions.github.com/constants.go @@ -6,5 +6,6 @@ const ( ) const ( - EnvVarRunnerJITConfig = "ACTIONS_RUNNER_INPUT_JITCONFIG" + EnvVarRunnerJITConfig = "ACTIONS_RUNNER_INPUT_JITCONFIG" + EnvVarRunnerExtraUserAgent = "GITHUB_ACTIONS_RUNNER_EXTRA_USER_AGENT" ) diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go index 45df0b4cda..f6aa1a4719 100644 --- a/controllers/actions.github.com/resourcebuilder.go +++ b/controllers/actions.github.com/resourcebuilder.go @@ -7,6 +7,7 @@ import ( "strconv" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/build" "github.com/actions/actions-runner-controller/hash" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -357,17 +358,23 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a for _, c := range runner.Spec.PodTemplateSpec.Spec.Containers { if c.Name == EphemeralRunnerContainerName { - c.Env = append(c.Env, corev1.EnvVar{ - Name: EnvVarRunnerJITConfig, - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: secret.Name, + c.Env = append( + c.Env, + corev1.EnvVar{ + Name: EnvVarRunnerJITConfig, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Key: jitTokenKey, }, - Key: jitTokenKey, }, }, - }) + corev1.EnvVar{ + Name: EnvVarRunnerExtraUserAgent, + Value: fmt.Sprintf("actions-runner-controller/%s", build.Version), + }) } newPod.Spec.Containers = append(newPod.Spec.Containers, c) From 0e05ce0198b6567d4a6c44b826147bdb2a39778d Mon Sep 17 00:00:00 2001 From: xi2817-aajgaonkar <88836921+xi2817-aajgaonkar@users.noreply.github.com> Date: Wed, 18 Jan 2023 04:42:13 +0530 Subject: [PATCH 027/561] Update quickstart.md (#2164) --- docs/quickstart.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/quickstart.md b/docs/quickstart.md index 013cfc5f2c..b7b98d160a 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -62,7 +62,7 @@ helm repo add actions-runner-controller https://actions-runner-controller.github helm upgrade --install --namespace actions-runner-system --create-namespace\ --set=authSecret.create=true\ --set=authSecret.github_token="REPLACE_YOUR_TOKEN_HERE"\ - --wait actions-runner-controller actions/actions-runner-controller + --wait actions-runner-controller actions-runner-controller/actions-runner-controller ``` *note:- Replace REPLACE_YOUR_TOKEN_HERE with your PAT that was generated previously. From d1037fb0bfb53cb17524f5bc9126226880775a71 Mon Sep 17 00:00:00 2001 From: Hyeonmin Park Date: Wed, 18 Jan 2023 08:12:24 +0900 Subject: [PATCH 028/561] Fix logFormat comment for each module in Helm chart (#2166) --- charts/actions-runner-controller/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/actions-runner-controller/values.yaml b/charts/actions-runner-controller/values.yaml index fb9a3378e1..4d724051ba 100644 --- a/charts/actions-runner-controller/values.yaml +++ b/charts/actions-runner-controller/values.yaml @@ -195,7 +195,7 @@ githubWebhookServer: enabled: false replicaCount: 1 useRunnerGroupsVisibility: false - ## specify log format for github webhook controller. Valid options are "text" and "json" + ## specify log format for github webhook server. Valid options are "text" and "json" logFormat: text secret: enabled: false @@ -298,7 +298,7 @@ actionsMetricsServer: # See the thread below for more context. # https://github.com/actions/actions-runner-controller/pull/1814#discussion_r974758924 replicaCount: 1 - ## specify log format for github webhook controller. Valid options are "text" and "json" + ## specify log format for actions metrics server. Valid options are "text" and "json" logFormat: text secret: enabled: false From 9f38c474af342d56d48f30ce29e57d650ed53eed Mon Sep 17 00:00:00 2001 From: Ritesh Khadgaray <7668569+ritzk@users.noreply.github.com> Date: Tue, 17 Jan 2023 18:13:08 -0500 Subject: [PATCH 029/561] Update installing-arc.md (#2162) --- docs/installing-arc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installing-arc.md b/docs/installing-arc.md index 48ba95eeda..f57981aa99 100644 --- a/docs/installing-arc.md +++ b/docs/installing-arc.md @@ -22,5 +22,5 @@ Configure your values.yaml, see the chart's [README](../charts/actions-runner-co ```shell helm repo add actions-runner-controller https://actions-runner-controller.github.io/actions-runner-controller helm upgrade --install --namespace actions-runner-system --create-namespace \ - --wait actions-runner-controller actions/actions-runner-controller + --wait actions-runner-controller actions-runner-controller/actions-runner-controller ``` From b8d4d6c8f3372f79b5c40aa527310cef9871f06b Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 17 Jan 2023 18:26:30 -0500 Subject: [PATCH 030/561] Update publish-arc2 workflow to use right path. (#2173) --- .github/workflows/publish-arc2.yaml | 79 ++++++++++++++++++++++++----- 1 file changed, 65 insertions(+), 14 deletions(-) diff --git a/.github/workflows/publish-arc2.yaml b/.github/workflows/publish-arc2.yaml index 8d26a84a72..8b2307f725 100644 --- a/.github/workflows/publish-arc2.yaml +++ b/.github/workflows/publish-arc2.yaml @@ -18,8 +18,13 @@ on: required: true type: boolean default: false - publish_helm: - description: 'Publish new helm chart' + publish_actions_runner_controller_2_chart: + description: 'Publish new helm chart for actions-runner-controller-2' + required: true + type: boolean + default: false + publish_auto_scaling_runner_set_chart: + description: 'Publish new helm chart for auto-scaling-runner-set' required: true type: boolean default: false @@ -94,10 +99,10 @@ jobs: echo "- Push to registries: ${{ inputs.push_to_registries }}" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY - publish-helm-chart: - if: ${{ inputs.publish_helm == true }} + publish-helm-chart-arc-2: + if: ${{ inputs.publish_actions_runner_controller_2_chart == true }} needs: build-push-image - name: Publish Helm chart + name: Publish Helm chart for actions-runner-controller-2 runs-on: ubuntu-latest steps: - name: Checkout @@ -124,21 +129,67 @@ jobs: with: version: ${{ env.HELM_VERSION }} - - name: Publish new helm chart + - name: Publish new helm chart for actions-runner-controller-2 run: | echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin - CHART_VERSION='$(cat charts_preview/actions-runner-controller-2/Chart.yaml | grep version: | cut -d " " -f 2)' - echo "CHART_VERSION_TAG=${CHART_VERSION}-${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_ENV - helm package charts_preview/actions-runner-controller-2/ --version="${CHART_VERSION}-${{ steps.resolve_parameters.outputs.short_sha }}" - # Tag is inferred from SemVer of Chart and cannot be set manually. - # See https://helm.sh/docs/topics/registries/#the-push-subcommand - helm push actions-runner-controller-"${CHART_VERSION}-${{ steps.resolve_parameters.outputs.short_sha }}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-helm-chart-2 + ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG=$(cat charts/actions-runner-controller-2/Chart.yaml | grep version: | cut -d " " -f 2) + echo "ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG=${ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG}" >> $GITHUB_ENV + helm package charts/actions-runner-controller-2/ --version="${ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG}" + helm push actions-runner-controller-2-"${ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts + + - name: Job summary + run: | + echo "New helm chart for actions-runner-controller-2 published successfully!" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY + echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY + echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY + echo "- Actions-Runner-Controller-2 Chart version: ${{ env.ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY + + publish-helm-chart-auto-scaling-runner-set: + if: ${{ inputs.publish_auto_scaling_runner_set_chart == true }} + needs: build-push-image + name: Publish Helm chart for auto-scaling-runner-set + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + # If inputs.ref is empty, it'll resolve to the default branch + ref: ${{ inputs.ref }} + + - name: Resolve parameters + id: resolve_parameters + run: | + resolvedRef="${{ inputs.ref }}" + if [ -z "$resolvedRef" ] + then + resolvedRef="${{ github.ref }}" + fi + echo "INFO: Resolving short SHA for $resolvedRef" + echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT + echo "INFO: Normalizing repository name (lowercase)" + echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT + + - name: Set up Helm + uses: azure/setup-helm@v3.3 + with: + version: ${{ env.HELM_VERSION }} + + - name: Publish new helm chart for auto-scaling-runner-set + run: | + echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin + + AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG=$(cat charts/auto-scaling-runner-set/Chart.yaml | grep version: | cut -d " " -f 2) + echo "AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG=${AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG}" >> $GITHUB_ENV + helm package charts/auto-scaling-runner-set/ --version="${AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG}" + helm push auto-scaling-runner-set-"${AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts - name: Job summary run: | - echo "New helm chart published successfully!" >> $GITHUB_STEP_SUMMARY + echo "New helm chart for auto-scaling-runner-set published successfully!" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY - echo "- Chart version: ${{ env.CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY + echo "- Auto-Scaling-Runner-Set Chart version: ${{ env.AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY From 35c6f4c5abe9d804c163664467eb4ce6d4784136 Mon Sep 17 00:00:00 2001 From: Stephane Moser Date: Wed, 18 Jan 2023 00:09:45 +0000 Subject: [PATCH 031/561] Add Repository information to Runner Status (#2093) Co-authored-by: Yusuke Kuoka --- .../v1alpha1/runner_types.go | 52 +++++++++++++++++++ .../v1alpha1/zz_generated.deepcopy.go | 20 +++++++ .../crds/actions.summerwind.dev_runners.yaml | 31 +++++++++++ .../bases/actions.summerwind.dev_runners.yaml | 31 +++++++++++ runner/update-status | 25 ++++++++- 5 files changed, 157 insertions(+), 2 deletions(-) diff --git a/apis/actions.summerwind.net/v1alpha1/runner_types.go b/apis/actions.summerwind.net/v1alpha1/runner_types.go index 9dda5ad9f0..7986efad90 100644 --- a/apis/actions.summerwind.net/v1alpha1/runner_types.go +++ b/apis/actions.summerwind.net/v1alpha1/runner_types.go @@ -248,10 +248,60 @@ type RunnerStatus struct { // +optional Message string `json:"message,omitempty"` // +optional + WorkflowStatus *WorkflowStatus `json:"workflow"` + // +optional // +nullable LastRegistrationCheckTime *metav1.Time `json:"lastRegistrationCheckTime,omitempty"` } +// WorkflowStatus contains various information that is propagated +// from GitHub Actions workflow run environment variables to +// ease monitoring workflow run/job/steps that are triggerred on the runner. +type WorkflowStatus struct { + // +optional + // Name is the name of the workflow + // that is triggerred within the runner. + // It corresponds to GITHUB_WORKFLOW defined in + // https://docs.github.com/en/actions/learn-github-actions/environment-variables + Name string `json:"name,omitempty"` + // +optional + // Repository is the owner and repository name of the workflow + // that is triggerred within the runner. + // It corresponds to GITHUB_REPOSITORY defined in + // https://docs.github.com/en/actions/learn-github-actions/environment-variables + Repository string `json:"repository,omitempty"` + // +optional + // ReositoryOwner is the repository owner's name for the workflow + // that is triggerred within the runner. + // It corresponds to GITHUB_REPOSITORY_OWNER defined in + // https://docs.github.com/en/actions/learn-github-actions/environment-variables + RepositoryOwner string `json:"repositoryOwner,omitempty"` + // +optional + // GITHUB_RUN_NUMBER is the unique number for the current workflow run + // that is triggerred within the runner. + // It corresponds to GITHUB_RUN_ID defined in + // https://docs.github.com/en/actions/learn-github-actions/environment-variables + RunNumber string `json:"runNumber,omitempty"` + // +optional + // RunID is the unique number for the current workflow run + // that is triggerred within the runner. + // It corresponds to GITHUB_RUN_ID defined in + // https://docs.github.com/en/actions/learn-github-actions/environment-variables + RunID string `json:"runID,omitempty"` + // +optional + // Job is the name of the current job + // that is triggerred within the runner. + // It corresponds to GITHUB_JOB defined in + // https://docs.github.com/en/actions/learn-github-actions/environment-variables + Job string `json:"job,omitempty"` + // +optional + // Action is the name of the current action or the step ID of the current step + // that is triggerred within the runner. + // It corresponds to GITHUB_ACTION defined in + // https://docs.github.com/en/actions/learn-github-actions/environment-variables + Action string `json:"action,omitempty"` +} + // RunnerStatusRegistration contains runner registration status type RunnerStatusRegistration struct { Enterprise string `json:"enterprise,omitempty"` @@ -316,6 +366,8 @@ func (w *WorkVolumeClaimTemplate) V1VolumeMount(mountPath string) corev1.VolumeM // +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string // +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string // +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string +// +kubebuilder:printcolumn:JSONPath=".status.workflow.repository",name=WF Repo,type=string +// +kubebuilder:printcolumn:JSONPath=".status.workflow.runID",name=WF Run,type=string // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // Runner is the Schema for the runners API diff --git a/apis/actions.summerwind.net/v1alpha1/zz_generated.deepcopy.go b/apis/actions.summerwind.net/v1alpha1/zz_generated.deepcopy.go index 6021b7a55c..bc450b7c11 100644 --- a/apis/actions.summerwind.net/v1alpha1/zz_generated.deepcopy.go +++ b/apis/actions.summerwind.net/v1alpha1/zz_generated.deepcopy.go @@ -1049,6 +1049,11 @@ func (in *RunnerSpec) DeepCopy() *RunnerSpec { func (in *RunnerStatus) DeepCopyInto(out *RunnerStatus) { *out = *in in.Registration.DeepCopyInto(&out.Registration) + if in.WorkflowStatus != nil { + in, out := &in.WorkflowStatus, &out.WorkflowStatus + *out = new(WorkflowStatus) + **out = **in + } if in.LastRegistrationCheckTime != nil { in, out := &in.LastRegistrationCheckTime, &out.LastRegistrationCheckTime *out = (*in).DeepCopy() @@ -1212,3 +1217,18 @@ func (in *WorkflowJobSpec) DeepCopy() *WorkflowJobSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowStatus) DeepCopyInto(out *WorkflowStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowStatus. +func (in *WorkflowStatus) DeepCopy() *WorkflowStatus { + if in == nil { + return nil + } + out := new(WorkflowStatus) + in.DeepCopyInto(out) + return out +} diff --git a/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml b/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml index fdbb0059a5..4129a909c4 100644 --- a/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml +++ b/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml @@ -36,6 +36,12 @@ spec: - jsonPath: .status.message name: Message type: string + - jsonPath: .status.workflow.repository + name: WF Repo + type: string + - jsonPath: .status.workflow.runID + name: WF Run + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -5225,6 +5231,31 @@ spec: - expiresAt - token type: object + workflow: + description: WorkflowStatus contains various information that is propagated from GitHub Actions workflow run environment variables to ease monitoring workflow run/job/steps that are triggerred on the runner. + properties: + action: + description: Action is the name of the current action or the step ID of the current step that is triggerred within the runner. It corresponds to GITHUB_ACTION defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + job: + description: Job is the name of the current job that is triggerred within the runner. It corresponds to GITHUB_JOB defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + name: + description: Name is the name of the workflow that is triggerred within the runner. It corresponds to GITHUB_WORKFLOW defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + repository: + description: Repository is the owner and repository name of the workflow that is triggerred within the runner. It corresponds to GITHUB_REPOSITORY defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + repositoryOwner: + description: ReositoryOwner is the repository owner's name for the workflow that is triggerred within the runner. It corresponds to GITHUB_REPOSITORY_OWNER defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + runID: + description: RunID is the unique number for the current workflow run that is triggerred within the runner. It corresponds to GITHUB_RUN_ID defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + runNumber: + description: GITHUB_RUN_NUMBER is the unique number for the current workflow run that is triggerred within the runner. It corresponds to GITHUB_RUN_ID defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + type: object type: object type: object served: true diff --git a/config/crd/bases/actions.summerwind.dev_runners.yaml b/config/crd/bases/actions.summerwind.dev_runners.yaml index fdbb0059a5..4129a909c4 100644 --- a/config/crd/bases/actions.summerwind.dev_runners.yaml +++ b/config/crd/bases/actions.summerwind.dev_runners.yaml @@ -36,6 +36,12 @@ spec: - jsonPath: .status.message name: Message type: string + - jsonPath: .status.workflow.repository + name: WF Repo + type: string + - jsonPath: .status.workflow.runID + name: WF Run + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -5225,6 +5231,31 @@ spec: - expiresAt - token type: object + workflow: + description: WorkflowStatus contains various information that is propagated from GitHub Actions workflow run environment variables to ease monitoring workflow run/job/steps that are triggerred on the runner. + properties: + action: + description: Action is the name of the current action or the step ID of the current step that is triggerred within the runner. It corresponds to GITHUB_ACTION defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + job: + description: Job is the name of the current job that is triggerred within the runner. It corresponds to GITHUB_JOB defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + name: + description: Name is the name of the workflow that is triggerred within the runner. It corresponds to GITHUB_WORKFLOW defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + repository: + description: Repository is the owner and repository name of the workflow that is triggerred within the runner. It corresponds to GITHUB_REPOSITORY defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + repositoryOwner: + description: ReositoryOwner is the repository owner's name for the workflow that is triggerred within the runner. It corresponds to GITHUB_REPOSITORY_OWNER defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + runID: + description: RunID is the unique number for the current workflow run that is triggerred within the runner. It corresponds to GITHUB_RUN_ID defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + runNumber: + description: GITHUB_RUN_NUMBER is the unique number for the current workflow run that is triggerred within the runner. It corresponds to GITHUB_RUN_ID defined in https://docs.github.com/en/actions/learn-github-actions/environment-variables + type: string + type: object type: object type: object served: true diff --git a/runner/update-status b/runner/update-status index d12e92a17d..81b1932a64 100755 --- a/runner/update-status +++ b/runner/update-status @@ -15,9 +15,30 @@ if [[ ${RUNNER_STATUS_UPDATE_HOOK:-false} == true ]]; then namespace=$(cat ${serviceaccount}/namespace) token=$(cat ${serviceaccount}/token) phase=$1 - shift + message=${2:-} - jq -n --arg phase "$phase" --arg message "${*:-}" '.status.phase = $phase | .status.message = $message' | curl \ + data=$(jq -n --arg phase "$phase" \ + --arg message "$message" \ + --arg workflow_repository "${GITHUB_REPOSITORY:-}" \ + --arg workflow_repository_owner "${GITHUB_REPOSITORY_OWNER:-}" \ + --arg workflow_name "${GITHUB_WORKFLOW:-}" \ + --arg workflow_run_id "${GITHUB_RUN_ID:-}" \ + --arg workflow_run_number "${GITHUB_RUN_NUMBER:-}" \ + --arg workflow_job "${GITHUB_JOB:-}" \ + --arg workflow_action "${GITHUB_ACTION:-}" \ + ' + .status.phase = $phase + | .status.message = $message + | .status.workflow.name = $workflow_name + | .status.workflow.runID = $workflow_run_id + | .status.workflow.runNumber = $workflow_run_number + | .status.workflow.repository = $workflow_repository + | .status.workflow.repositoryOwner = $workflow_repository_owner + | .status.workflow.job = $workflow_job + | .status.workflow.action = $workflow_action + ') + + echo "$data" | curl \ --cacert ${serviceaccount}/ca.crt \ --data @- \ --noproxy '*' \ From 50a04d00715f780f36f1a842fd08a80cc1bf9b91 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Wed, 18 Jan 2023 14:17:25 +0100 Subject: [PATCH 032/561] Add arc-2 quickstart guide (#2180) --- .../actions-runner-controller-2/README.md | 142 ++++++++++++++++++ docs/quickstart.md | 2 +- 2 files changed, 143 insertions(+), 1 deletion(-) create mode 100644 docs/preview/actions-runner-controller-2/README.md diff --git a/docs/preview/actions-runner-controller-2/README.md b/docs/preview/actions-runner-controller-2/README.md new file mode 100644 index 0000000000..32d1099122 --- /dev/null +++ b/docs/preview/actions-runner-controller-2/README.md @@ -0,0 +1,142 @@ +# Autoscaling Runner Scale Sets mode + +**⚠️ This mode is currently only available for a limited number of organizations.** + +This new autoscaling mode brings numerous enhancements (described in the following sections) that will make your experience more reliable and secure. + +## How it works + +![arc_hld_v1 drawio (1)](https://user-images.githubusercontent.com/568794/212665433-2d1f3d6e-0ba8-4f02-9d1b-27d00c49abd1.png) + +In addition to the increased reliability of the automatic scaling, we have worked on these improvements: + +- No longer require cert-manager as a prerequisite for installing actions-runner-controller +- Reliable scale-up based on job demands and scale-down to zero runner pods +- Reduce API requests to `api.github.com`, no more API rate-limiting problems +- The GitHub Personal Access Token (PAT) or the GitHub App installation token is no longer passed to the runner pod for runner registration +- Maximum flexibility for customizing your runner pod template + +### Demo + +https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a73e-27f5e8c75720.mp4 + +## Setup + +### Prerequisites + +1. Create a K8s cluster, if not available. + - If you don't have a K8s cluster, you can install a local environment using minikube. See [installing minikube](https://minikube.sigs.k8s.io/docs/start/). +1. Install helm 3, if not available. See [installing Helm](https://helm.sh/docs/intro/install/). + +### Install actions-runner-controller + +1. Install actions-runner-controller using helm 3. For additional configuration options, see [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/actions-runner-controller-2/values.yaml) + + ```bash + NAMESPACE="arc-systems" + helm install arc \ + --namespace "${NAMESPACE}" \ + --create-namespace \ + oci://ghcr.io/actions/actions-runner-controller-charts/actions-runner-controller-2 \ + --version 0.1.0 + ``` + +1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app). + - For the list of required permissions, see [Authenticating to the GitHub API](https://github.com/actions/actions-runner-controller/blob/master/docs/authenticating-to-the-github-api.md#authenticating-to-the-github-api). + +1. You're ready to install the autoscaling runner set. For additional configuration options, see [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/auto-scaling-runner-set/values.yaml) + - **Choose your installation name carefully**, you will use it as the value of `runs-on` in your workflow. + + ```bash + # Using a Personal Access Token (PAT) + INSTALLATION_NAME="arc-runner-set" + NAMESPACE="arc-systems" + GITHUB_CONFIG_URL="https://github.com/" + GITHUB_PAT="" + helm install "${INSTALLATION_NAME}" \ + --namespace "${NAMESPACE}" \ + --create-namespace \ + --set githubConfigUrl="${GITHUB_CONFIG_URL}" \ + --set githubConfigSecret.github_token="${GITHUB_PAT}" \ + oci://ghcr.io/actions/actions-runner-controller-charts/auto-scaling-runner-set --version 0.1.0 + ``` + + ```bash + # Using a GitHub App + INSTALLATION_NAME="arc-runner-set" + NAMESPACE="arc-systems" + GITHUB_CONFIG_URL="https://github.com/" + GITHUB_APP_ID="" + GITHUB_APP_INSTALLATION_ID="" + GITHUB_APP_PRIVATE_KEY="" + helm install arc-runner-set \ + --namespace "${NAMESPACE}" \ + --create-namespace \ + --set githubConfigUrl="${GITHUB_CONFIG_URL}" \ + --set githubConfigSecret.github_app_id="${GITHUB_APP_ID}" \ + --set githubConfigSecret.github_app_installation_id="${GITHUB_APP_INSTALLATION_ID}" \ + --set githubConfigSecret.github_app_private_key="${GITHUB_APP_PRIVATE_KEY}" \ + oci://ghcr.io/actions/actions-runner-controller-charts/auto-scaling-runner-set --version 0.1.0 + ``` + +1. Check your installation. If everything went well, you should see the following: + + ```bash + $ helm list -n "${NAMESPACE}" + + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed actions-runner-controller-2-0.1.0 preview + arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed auto-scaling-runner-set-0.1.0 0.1.0 + ``` + + ```bash + $ kubectl get pods -n "${NAMESPACE}" + + NAME READY STATUS RESTARTS AGE + arc-actions-runner-controller-2-8c74b6f95-gr7zr 1/1 Running 0 20m + arc-runner-set-6cd58d58-listener 1/1 Running 0 21s + ``` + +1. In a repository, create a simple test workflow as follows. The `runs-on` value should match the helm installation name you used in the previous step. + + ```yaml + name: Test workflow + on: + workflow_dispatch: + + jobs: + test: + runs-on: arc-runner-set + steps: + - name: Hello world + run: echo "Hello world" + ``` + +1. Run the workflow. You should see the runner pod being created and the workflow being executed. + + ```bash + $ kubectl get pods -n "${NAMESPACE}" + + NAMESPACE NAME READY STATUS RESTARTS AGE + arc-systems arc-actions-runner-controller-2-8c74b6f95-gr7zr 1/1 Running 0 27m + arc-systems arc-runner-set-6cd58d58-listener 1/1 Running 0 7m52s + arc-systems arc-runner-set-rmrgw-runner-p9p5n 1/1 Running 0 21s + ``` + +## Troubleshooting + +### Check the logs + +You can check the logs of the controller pod using the following command: + +```bash +# Controller logs +$ kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=actions-runner-controller-2 + +# Runner set listener logs +kubectl logs -n "${NAMESPACE}" -l runner-scale-set-listener=arc-systems-arc-runner-set +``` + +### If you installed the autoscaling runner set, but the listener pod is not created + +Verify that the secret you provided is correct and that the `githubConfigUrl` you provided is accurate. diff --git a/docs/quickstart.md b/docs/quickstart.md index b7b98d160a..4061cf5026 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -46,7 +46,7 @@ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/ ### Deploy and Configure ARC -1️⃣ Deploy and configure ARC on your K8s cluster. You may use Helm or Kubectl. +1️⃣ Deploy and configure ARC on your K8s cluster. You may use Helm or Kubectl.
Helm deployment From 1fa3c392ad08ff041d1539375bfa4d5e5da56358 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Wed, 18 Jan 2023 15:59:31 +0100 Subject: [PATCH 033/561] Add distinct namespace best practice note (#2181) --- docs/preview/actions-runner-controller-2/README.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/preview/actions-runner-controller-2/README.md b/docs/preview/actions-runner-controller-2/README.md index 32d1099122..792d4c0a5f 100644 --- a/docs/preview/actions-runner-controller-2/README.md +++ b/docs/preview/actions-runner-controller-2/README.md @@ -42,15 +42,16 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 ``` 1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app). - - For the list of required permissions, see [Authenticating to the GitHub API](https://github.com/actions/actions-runner-controller/blob/master/docs/authenticating-to-the-github-api.md#authenticating-to-the-github-api). + - ℹ For the list of required permissions, see [Authenticating to the GitHub API](https://github.com/actions/actions-runner-controller/blob/master/docs/authenticating-to-the-github-api.md#authenticating-to-the-github-api). 1. You're ready to install the autoscaling runner set. For additional configuration options, see [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/auto-scaling-runner-set/values.yaml) - - **Choose your installation name carefully**, you will use it as the value of `runs-on` in your workflow. + - ℹ **Choose your installation name carefully**, you will use it as the value of `runs-on` in your workflow. + - ℹ **We recommend you choose a unique namespace in the following steps**. As a good security measure, it's best to have your runner pods created in a different namespace than the one containing the manager and listener pods. ```bash # Using a Personal Access Token (PAT) INSTALLATION_NAME="arc-runner-set" - NAMESPACE="arc-systems" + NAMESPACE="arc-runners" GITHUB_CONFIG_URL="https://github.com/" GITHUB_PAT="" helm install "${INSTALLATION_NAME}" \ @@ -64,7 +65,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 ```bash # Using a GitHub App INSTALLATION_NAME="arc-runner-set" - NAMESPACE="arc-systems" + NAMESPACE="arc-runners" GITHUB_CONFIG_URL="https://github.com/" GITHUB_APP_ID="" GITHUB_APP_INSTALLATION_ID="" @@ -115,12 +116,12 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 1. Run the workflow. You should see the runner pod being created and the workflow being executed. ```bash - $ kubectl get pods -n "${NAMESPACE}" + $ kubectl get pods -A NAMESPACE NAME READY STATUS RESTARTS AGE arc-systems arc-actions-runner-controller-2-8c74b6f95-gr7zr 1/1 Running 0 27m arc-systems arc-runner-set-6cd58d58-listener 1/1 Running 0 7m52s - arc-systems arc-runner-set-rmrgw-runner-p9p5n 1/1 Running 0 21s + arc-runners arc-runner-set-rmrgw-runner-p9p5n 1/1 Running 0 21s ``` ## Troubleshooting From 177a96bba520aa9e7b726a24740100605037b865 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Thu, 19 Jan 2023 11:36:05 +0100 Subject: [PATCH 034/561] Update runner version to 2.301.1 (#2182) Co-authored-by: TingluoHuang --- .github/workflows/release-runners.yaml | 2 +- Makefile | 2 +- runner/Makefile | 2 +- runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile | 2 +- runner/actions-runner-dind.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner-dind.ubuntu-22.04.dockerfile | 2 +- runner/actions-runner.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner.ubuntu-22.04.dockerfile | 2 +- test/e2e/e2e_test.go | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/release-runners.yaml b/.github/workflows/release-runners.yaml index 6d27b58ddd..726a8bf7a9 100644 --- a/.github/workflows/release-runners.yaml +++ b/.github/workflows/release-runners.yaml @@ -19,7 +19,7 @@ env: PUSH_TO_REGISTRIES: true TARGET_ORG: actions-runner-controller TARGET_WORKFLOW: release-runners.yaml - RUNNER_VERSION: 2.300.2 + RUNNER_VERSION: 2.301.1 DOCKER_VERSION: 20.10.21 RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0 diff --git a/Makefile b/Makefile index d997d3fa2d..171915bfef 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ else endif DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1) VERSION ?= dev -RUNNER_VERSION ?= 2.300.2 +RUNNER_VERSION ?= 2.301.1 TARGETPLATFORM ?= $(shell arch) RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_TAG ?= ${VERSION} diff --git a/runner/Makefile b/runner/Makefile index baafc49dbd..1b59f237e1 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) -RUNNER_VERSION ?= 2.300.2 +RUNNER_VERSION ?= 2.301.1 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.2.0 DOCKER_VERSION ?= 20.10.21 diff --git a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile index faddc4edd7..eb36d8a37d 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_VERSION=2.301.1 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ENV CHANNEL=stable diff --git a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile index 72ca96ba08..cc7a1c850c 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_VERSION=2.301.1 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ENV CHANNEL=stable diff --git a/runner/actions-runner-dind.ubuntu-20.04.dockerfile b/runner/actions-runner-dind.ubuntu-20.04.dockerfile index 6aab2d60c7..0e8e790acd 100644 --- a/runner/actions-runner-dind.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-20.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_VERSION=2.301.1 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable diff --git a/runner/actions-runner-dind.ubuntu-22.04.dockerfile b/runner/actions-runner-dind.ubuntu-22.04.dockerfile index 2170d10a7c..e549ca753d 100644 --- a/runner/actions-runner-dind.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-22.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_VERSION=2.301.1 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable diff --git a/runner/actions-runner.ubuntu-20.04.dockerfile b/runner/actions-runner.ubuntu-20.04.dockerfile index 94ceb531af..3c4ae5a156 100644 --- a/runner/actions-runner.ubuntu-20.04.dockerfile +++ b/runner/actions-runner.ubuntu-20.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_VERSION=2.301.1 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable diff --git a/runner/actions-runner.ubuntu-22.04.dockerfile b/runner/actions-runner.ubuntu-22.04.dockerfile index 3b998b76cf..e4c304f9d5 100644 --- a/runner/actions-runner.ubuntu-22.04.dockerfile +++ b/runner/actions-runner.ubuntu-22.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.300.2 +ARG RUNNER_VERSION=2.301.1 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 9d88d2d0dc..04e6810b28 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -41,7 +41,7 @@ var ( testResultCMNamePrefix = "test-result-" - RunnerVersion = "2.300.2" + RunnerVersion = "2.301.1" ) // If you're willing to run this test via VS Code "run test" or "debug test", From 6580d75aae55d783d45297e2e7a1a2a031e6bc07 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Thu, 19 Jan 2023 07:33:04 -0500 Subject: [PATCH 035/561] Fix L0 test to make it more reliable. (#2178) --- .../ephemeralrunner_controller.go | 2 + .../ephemeralrunner_controller_test.go | 82 ++++---- .../ephemeralrunnerset_controller_test.go | 180 ++++++++++++++++-- 3 files changed, 209 insertions(+), 55 deletions(-) diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go index 3ef0306cbd..b6945d2986 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller.go +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -310,6 +310,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context, } if len(runnerLinkedPodList.Items) == 0 { + log.Info("Runner-linked pods are deleted") return true, nil } @@ -344,6 +345,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte } if len(runnerLinkedSecretList.Items) == 0 { + log.Info("Runner-linked secrets are deleted") return true, nil } diff --git a/controllers/actions.github.com/ephemeralrunner_controller_test.go b/controllers/actions.github.com/ephemeralrunner_controller_test.go index 42f808094e..7f006454d7 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunner_controller_test.go @@ -2,6 +2,7 @@ package actionsgithubcom import ( "context" + "fmt" "net/http" "time" @@ -22,7 +23,7 @@ import ( const ( gh_token = "gh_token" - timeout = time.Second * 30 + timeout = time.Second * 10 interval = time.Millisecond * 250 runnerImage = "ghcr.io/actions/actions-runner:latest" ) @@ -344,19 +345,6 @@ var _ = Describe("EphemeralRunner", func() { interval, ).Should(BeEquivalentTo(true)) - Eventually( - func() (bool, error) { - secret := new(corev1.Secret) - err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, secret) - if err == nil { - return false, nil - } - return kerrors.IsNotFound(err), nil - }, - timeout, - interval, - ).Should(BeEquivalentTo(true)) - Eventually( func() (bool, error) { updated := new(v1alpha1.EphemeralRunner) @@ -458,20 +446,43 @@ var _ = Describe("EphemeralRunner", func() { }) It("It should not re-create pod indefinitely", func() { + updated := new(v1alpha1.EphemeralRunner) pod := new(corev1.Pod) - failures := 0 - for i := 0; i < 6; i++ { - Eventually( - func() (bool, error) { - if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { - return false, err + Eventually( + func() (bool, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) + if err != nil { + return false, err + } + + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod) + if err != nil { + if kerrors.IsNotFound(err) && len(updated.Status.Failures) > 5 { + return true, nil } - return true, nil - }, - timeout, - interval, - ).Should(BeEquivalentTo(true)) + return false, err + } + + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{ + Name: EphemeralRunnerContainerName, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + }, + }, + }) + err = k8sClient.Status().Update(ctx, pod) + Expect(err).To(BeNil(), "Failed to update pod status") + return false, fmt.Errorf("pod haven't failed for 5 times.") + }, + timeout, + interval, + ).Should(BeEquivalentTo(true), "we should stop creating pod after 5 failures") + + // In case we still have pod created due to controller-runtime cache delay, mark the container as exited + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod) + if err == nil { pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{ Name: EphemeralRunnerContainerName, State: corev1.ContainerState{ @@ -482,19 +493,18 @@ var _ = Describe("EphemeralRunner", func() { }) err := k8sClient.Status().Update(ctx, pod) Expect(err).To(BeNil(), "Failed to update pod status") - - failures++ - - updated := new(v1alpha1.EphemeralRunner) - Eventually(func() (bool, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) - if err != nil { - return false, err - } - return len(updated.Status.Failures) == failures, nil - }, timeout, interval).Should(BeEquivalentTo(true)) } + // EphemeralRunner should failed with reason TooManyPodFailures + Eventually(func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) + if err != nil { + return "", err + } + return updated.Status.Reason, nil + }, timeout, interval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures") + + // EphemeralRunner should not have any pod Eventually(func() (bool, error) { err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod) if err == nil { diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go index aa53504bbc..817e846f33 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -20,7 +20,7 @@ import ( ) const ( - ephemeralRunnerSetTestTimeout = time.Second * 5 + ephemeralRunnerSetTestTimeout = time.Second * 10 ephemeralRunnerSetTestInterval = time.Millisecond * 250 ephemeralRunnerSetTestGitHubToken = "gh_token" ) @@ -172,6 +172,26 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { return -1, err } + // Set status to simulate a configured EphemeralRunner + refetch := false + for i, runner := range runnerList.Items { + if runner.Status.RunnerId == 0 { + updatedRunner := runner.DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + updatedRunner.Status.RunnerId = i + 100 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + refetch = true + } + } + + if refetch { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + } + return len(runnerList.Items), nil }, ephemeralRunnerSetTestTimeout, @@ -214,6 +234,26 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { return -1, err } + // Set status to simulate a configured EphemeralRunner + refetch := false + for i, runner := range runnerList.Items { + if runner.Status.RunnerId == 0 { + updatedRunner := runner.DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + updatedRunner.Status.RunnerId = i + 100 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + refetch = true + } + } + + if refetch { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + } + return len(runnerList.Items), nil }, ephemeralRunnerSetTestTimeout, @@ -278,20 +318,31 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { return -1, err } + // Set status to simulate a configured EphemeralRunner + refetch := false + for i, runner := range runnerList.Items { + if runner.Status.RunnerId == 0 { + updatedRunner := runner.DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + updatedRunner.Status.RunnerId = i + 100 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + refetch = true + } + } + + if refetch { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + } + return len(runnerList.Items), nil }, ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created") - // Set status to simulate a configured EphemeralRunner - for i, runner := range runnerList.Items { - updatedRunner := runner.DeepCopy() - updatedRunner.Status.Phase = corev1.PodRunning - updatedRunner.Status.RunnerId = i + 100 - err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - } - // Mark one of the EphemeralRunner as finished finishedRunner := runnerList.Items[4].DeepCopy() finishedRunner.Status.Phase = corev1.PodSucceeded @@ -327,20 +378,31 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { return -1, err } + // Set status to simulate a configured EphemeralRunner + refetch := false + for i, runner := range runnerList.Items { + if runner.Status.RunnerId == 0 { + updatedRunner := runner.DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + updatedRunner.Status.RunnerId = i + 100 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + refetch = true + } + } + + if refetch { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + } + return len(runnerList.Items), nil }, ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created") - // Set status to simulate a configured EphemeralRunner - for i, runner := range runnerList.Items { - updatedRunner := runner.DeepCopy() - updatedRunner.Status.Phase = corev1.PodRunning - updatedRunner.Status.RunnerId = i + 100 - err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - } - // Scale down the EphemeralRunnerSet updated = created.DeepCopy() updated.Spec.Replicas = 3 @@ -356,6 +418,26 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { return -1, err } + // Set status to simulate a configured EphemeralRunner + refetch := false + for i, runner := range runnerList.Items { + if runner.Status.RunnerId == 0 { + updatedRunner := runner.DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + updatedRunner.Status.RunnerId = i + 100 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + refetch = true + } + } + + if refetch { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + } + return len(runnerList.Items), nil }, ephemeralRunnerSetTestTimeout, @@ -387,6 +469,26 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { return -1, err } + // Set status to simulate a configured EphemeralRunner + refetch := false + for i, runner := range runnerList.Items { + if runner.Status.RunnerId == 0 { + updatedRunner := runner.DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + updatedRunner.Status.RunnerId = i + 100 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + refetch = true + } + } + + if refetch { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + } + return len(runnerList.Items), nil }, ephemeralRunnerSetTestTimeout, @@ -413,6 +515,26 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { return -1, err } + // Set status to simulate a configured EphemeralRunner + refetch := false + for i, runner := range runnerList.Items { + if runner.Status.RunnerId == 0 { + updatedRunner := runner.DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + updatedRunner.Status.RunnerId = i + 100 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + refetch = true + } + } + + if refetch { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + } + return len(runnerList.Items), nil }, ephemeralRunnerSetTestTimeout, @@ -436,6 +558,26 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { return -1, err } + // Set status to simulate a configured EphemeralRunner + refetch := false + for i, runner := range runnerList.Items { + if runner.Status.RunnerId == 0 { + updatedRunner := runner.DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + updatedRunner.Status.RunnerId = i + 100 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + refetch = true + } + } + + if refetch { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + } + return len(runnerList.Items), nil }, ephemeralRunnerSetTestTimeout, From 408f296837c60298c2d2acbad7547e6203098509 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Thu, 19 Jan 2023 14:21:08 +0100 Subject: [PATCH 036/561] Include nikola-jokic in CODEOWNERS file (#2184) --- CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 4b91db0383..e731691ab2 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,2 @@ # actions-runner-controller maintainers -* @mumoshu @toast-gear @actions/actions-runtime +* @mumoshu @toast-gear @actions/actions-runtime @nikola-jokic From 3403dc81efcf3d030728973f7811cd178555a3fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 21 Jan 2023 12:56:08 +0900 Subject: [PATCH 037/561] chore(deps): bump github.com/onsi/gomega from 1.20.2 to 1.25.0 (#2169) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 27 ++++------------------ go.sum | 73 +++++++++------------------------------------------------- 2 files changed, 16 insertions(+), 84 deletions(-) diff --git a/go.mod b/go.mod index 940e660bf9..e49696f690 100644 --- a/go.mod +++ b/go.mod @@ -13,11 +13,10 @@ require ( github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 - github.com/gruntwork-io/terratest v0.40.24 github.com/hashicorp/go-retryablehttp v0.7.1 github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.20.2 + github.com/onsi/gomega v1.25.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.13.0 github.com/stretchr/testify v1.8.0 @@ -38,21 +37,15 @@ require ( cloud.google.com/go/compute/metadata v0.2.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/aws/aws-sdk-go v1.40.56 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/emicklei/go-restful/v3 v3.8.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect github.com/go-logr/zapr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/swag v0.19.14 // indirect - github.com/go-sql-driver/mysql v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect @@ -60,38 +53,28 @@ require ( github.com/google/go-github/v45 v45.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.1.0 // indirect - github.com/gruntwork-io/go-commons v0.8.0 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-multierror v1.1.0 // indirect github.com/imdario/mergo v0.3.12 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect - github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/pquerna/otp v1.2.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.4.0 // indirect - github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect golang.org/x/crypto v0.0.0-20220824171710-5757bc0c5503 // indirect - golang.org/x/net v0.0.0-20221014081412-f15817d10f9b // indirect - golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/net v0.5.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/term v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect diff --git a/go.sum b/go.sum index bbf85b1816..fb48b8cd49 100644 --- a/go.sum +++ b/go.sum @@ -48,18 +48,12 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/aws/aws-sdk-go v1.40.56 h1:FM2yjR0UUYFzDTMx+mH9Vyw1k1EUUxsAFzk+BjkzANA= -github.com/aws/aws-sdk-go v1.40.56/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= -github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 h1:5+NghM1Zred9Z078QEZtm28G/kfDfZN/92gkDlLwGVA= github.com/bradleyfalzon/ghinstallation/v2 v2.1.0/go.mod h1:Xg3xPRN5Mcq6GDqeUVhFbjEWMb4JHCyWEeeBGEYQoTU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -71,15 +65,11 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -91,16 +81,10 @@ github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU= -github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -127,8 +111,6 @@ github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -208,20 +190,11 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro= -github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78= -github.com/gruntwork-io/terratest v0.40.24 h1:vxVi714rX+joBLrxBVnbMzSYQ2srIfXzjqvImHl6Rtk= -github.com/gruntwork-io/terratest v0.40.24/go.mod h1:JGeIGgLbxbG9/Oqm06z6YXVr76CfomdmLkV564qov+8= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -231,10 +204,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -264,19 +233,9 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= -github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg= -github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -297,19 +256,17 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= +github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.20.2 h1:8uQq0zMgLEfa0vRrrBgaJF2gyW9Da9BmfGV+OyUzfkY= -github.com/onsi/gomega v1.20.2/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= +github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok= -github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -336,10 +293,6 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -362,8 +315,6 @@ github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PK github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/teambition/rrule-go v1.8.0 h1:a/IX5s56hGkFF+nRlJUooZU/45OTeeldBGL29nDKIHw= github.com/teambition/rrule-go v1.8.0/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4= -github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -456,11 +407,10 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b h1:tvrvnPFcdzp294diPnrdZZZ8XUt2Tyj7svb7X52iDuU= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -485,7 +435,6 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -497,7 +446,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -527,11 +475,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -539,8 +488,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 8f266e01f6f8d5c0ee28c980ef97c06ac36910bf Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Mon, 23 Jan 2023 11:50:14 +0000 Subject: [PATCH 038/561] Refactor actions.Client with options to help extensibility (#2193) --- cmd/githubrunnerscalesetlistener/main.go | 8 +- github/actions/actions_server_test.go | 81 ++ github/actions/client.go | 172 ++-- github/actions/client_generate_jit_test.go | 63 +- github/actions/client_job_acquisition_test.go | 119 +-- .../client_runner_scale_set_message_test.go | 262 ++---- .../client_runner_scale_set_session_test.go | 167 ++-- .../actions/client_runner_scale_set_test.go | 870 ++++-------------- github/actions/client_runner_test.go | 180 ++-- github/actions/multi_client.go | 8 +- github/actions/multi_client_test.go | 9 +- 11 files changed, 648 insertions(+), 1291 deletions(-) create mode 100644 github/actions/actions_server_test.go diff --git a/cmd/githubrunnerscalesetlistener/main.go b/cmd/githubrunnerscalesetlistener/main.go index 2c9d71b273..0668459778 100644 --- a/cmd/githubrunnerscalesetlistener/main.go +++ b/cmd/githubrunnerscalesetlistener/main.go @@ -84,7 +84,13 @@ func run(rc RunnerScaleSetListenerConfig, logger logr.Logger) error { } } - actionsServiceClient, err := actions.NewClient(ctx, rc.ConfigureUrl, creds, fmt.Sprintf("actions-runner-controller/%s", build.Version), logger) + actionsServiceClient, err := actions.NewClient( + ctx, + rc.ConfigureUrl, + creds, + actions.WithUserAgent(fmt.Sprintf("actions-runner-controller/%s", build.Version)), + actions.WithLogger(logger), + ) if err != nil { return fmt.Errorf("failed to create an Actions Service client: %w", err) } diff --git a/github/actions/actions_server_test.go b/github/actions/actions_server_test.go new file mode 100644 index 0000000000..8b66e45ff6 --- /dev/null +++ b/github/actions/actions_server_test.go @@ -0,0 +1,81 @@ +package actions_test + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/golang-jwt/jwt/v4" + "github.com/stretchr/testify/require" +) + +// newActionsServer returns a new httptest.Server that handles the +// authentication requests neeeded to create a new client. Any requests not +// made to the /actions/runners/registration-token or +// /actions/runner-registration endpoints will be handled by the provided +// handler. The returned server is started and will be automatically closed +// when the test ends. +func newActionsServer(t *testing.T, handler http.Handler) *actionsServer { + var u string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // handle getRunnerRegistrationToken + if strings.HasSuffix(r.URL.Path, "/runners/registration-token") { + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"token":"token"}`)) + return + } + + // handle getActionsServiceAdminConnection + if strings.HasSuffix(r.URL.Path, "/actions/runner-registration") { + claims := &jwt.RegisteredClaims{ + IssuedAt: jwt.NewNumericDate(time.Now().Add(-1 * time.Minute)), + ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Minute)), + Issuer: "123", + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) + privateKey, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(samplePrivateKey)) + require.NoError(t, err) + tokenString, err := token.SignedString(privateKey) + require.NoError(t, err) + w.Write([]byte(`{"url":"` + u + `","token":"` + tokenString + `"}`)) + return + } + + handler.ServeHTTP(w, r) + })) + + u = server.URL + + t.Cleanup(func() { + server.Close() + }) + + return &actionsServer{server} +} + +type actionsServer struct { + *httptest.Server +} + +func (s *actionsServer) configURLForOrg(org string) string { + return s.URL + "/" + org +} + +const samplePrivateKey = `-----BEGIN RSA PRIVATE KEY----- +MIICWgIBAAKBgHXfRT9cv9UY9fAAD4+1RshpfSSZe277urfEmPfX3/Og9zJYRk// +CZrJVD1CaBZDiIyQsNEzjta7r4UsqWdFOggiNN2E7ZTFQjMSaFkVgrzHqWuiaCBf +/BjbKPn4SMDmTzHvIe7Nel76hBdCaVgu6mYCW5jmuSH5qz/yR1U1J/WJAgMBAAEC +gYARWGWsSU3BYgbu5lNj5l0gKMXNmPhdAJYdbMTF0/KUu18k/XB7XSBgsre+vALt +I8r4RGKApoGif8P4aPYUyE8dqA1bh0X3Fj1TCz28qoUL5//dA+pigCRS20H7HM3C +ojoqF7+F+4F2sXmzFNd1NgY5RxFPYosTT7OnUiFuu2IisQJBALnMLe09LBnjuHXR +xxR65DDNxWPQLBjW3dL+ubLcwr7922l6ZIQsVjdeE0ItEUVRjjJ9/B/Jq9VJ/Lw4 +g9LCkkMCQQCiaM2f7nYmGivPo9hlAbq5lcGJ5CCYFfeeYzTxMqum7Mbqe4kk5lgb +X6gWd0Izg2nGdAEe/97DClO6VpKcPbpDAkBTR/JOJN1fvXMxXJaf13XxakrQMr+R +Yr6LlSInykyAz8lJvlLP7A+5QbHgN9NF/wh+GXqpxPwA3ukqdSqhjhWBAkBn6mDv +HPgR5xrzL6XM8y9TgaOlJAdK6HtYp6d/UOmN0+Butf6JUq07TphRT5tXNJVgemch +O5x/9UKfbrc+KyzbAkAo97TfFC+mZhU1N5fFelaRu4ikPxlp642KRUSkOh8GEkNf +jQ97eJWiWtDcsMUhcZgoB5ydHcFlrBIn6oBcpge5 +-----END RSA PRIVATE KEY-----` diff --git a/github/actions/client.go b/github/actions/client.go index 8e0290869e..f52d2b9542 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -7,6 +7,7 @@ import ( "encoding/json" "fmt" "io" + "log" "net/http" "net/url" "path" @@ -62,8 +63,8 @@ type Client struct { ActionsServiceAdminTokenExpiresAt *time.Time ActionsServiceURL *string - RetryMax *int - RetryWaitMax *time.Duration + retryMax int + retryWaitMax time.Duration creds *ActionsAuth githubConfigURL string @@ -71,14 +72,57 @@ type Client struct { userAgent string } -func NewClient(ctx context.Context, githubConfigURL string, creds *ActionsAuth, userAgent string, logger logr.Logger) (ActionsService, error) { +type ClientOption func(*Client) + +func WithUserAgent(userAgent string) ClientOption { + return func(c *Client) { + c.userAgent = userAgent + } +} + +func WithLogger(logger logr.Logger) ClientOption { + return func(c *Client) { + c.logger = logger + } +} + +func WithRetryMax(retryMax int) ClientOption { + return func(c *Client) { + c.retryMax = retryMax + } +} + +func WithRetryWaitMax(retryWaitMax time.Duration) ClientOption { + return func(c *Client) { + c.retryWaitMax = retryWaitMax + } +} + +func NewClient(ctx context.Context, githubConfigURL string, creds *ActionsAuth, options ...ClientOption) (ActionsService, error) { ac := &Client{ creds: creds, githubConfigURL: githubConfigURL, - logger: logger, - userAgent: userAgent, + logger: logr.Discard(), + + // retryablehttp defaults + retryMax: 4, + retryWaitMax: 30 * time.Second, + } + + for _, option := range options { + option(ac) } + retryClient := retryablehttp.NewClient() + + // TODO: this silences retryclient default logger, do we want to provide one + // instead? by default retryablehttp logs all requests to stderr + retryClient.Logger = log.New(io.Discard, "", log.LstdFlags) + + retryClient.RetryMax = ac.retryMax + retryClient.RetryWaitMax = ac.retryWaitMax + ac.Client = retryClient.StandardClient() + rt, err := ac.getRunnerRegistrationToken(ctx, githubConfigURL, *creds) if err != nil { return nil, fmt.Errorf("failed to get runner registration token: %w", err) @@ -121,9 +165,7 @@ func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName strin req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -165,9 +207,7 @@ func (c *Client) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -182,7 +222,6 @@ func (c *Client) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int return nil, err } return runnerScaleSet, nil - } func (c *Client) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (*RunnerGroup, error) { @@ -204,9 +243,7 @@ func (c *Client) GetRunnerGroupByName(ctx context.Context, runnerGroup string) ( req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -260,51 +297,7 @@ func (c *Client) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *Runne req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, ParseActionsErrorFromResponse(resp) - } - var createdRunnerScaleSet *RunnerScaleSet - err = unmarshalBody(resp, &createdRunnerScaleSet) - if err != nil { - return nil, err - } - return createdRunnerScaleSet, nil -} - -func (c *Client) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) { - u := fmt.Sprintf("%s/%s/%d?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) - - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) - } - - body, err := json.Marshal(runnerScaleSet) - if err != nil { - return nil, err - } - - req, err := http.NewRequestWithContext(ctx, http.MethodPut, u, bytes.NewBuffer(body)) - if err != nil { - return nil, err - } - - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -312,7 +305,6 @@ func (c *Client) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, if resp.StatusCode != http.StatusOK { return nil, ParseActionsErrorFromResponse(resp) } - var createdRunnerScaleSet *RunnerScaleSet err = unmarshalBody(resp, &createdRunnerScaleSet) if err != nil { @@ -340,9 +332,7 @@ func (c *Client) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int) req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return err } @@ -372,9 +362,7 @@ func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAc req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -425,9 +413,7 @@ func (c *Client) DeleteMessage(ctx context.Context, messageQueueUrl, messageQueu req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return err } @@ -497,9 +483,7 @@ func (c *Client) doSessionRequest(ctx context.Context, method, url string, reque req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return err } @@ -542,9 +526,7 @@ func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQ req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -581,9 +563,7 @@ func (c *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (* req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -629,9 +609,7 @@ func (c *Client) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting * req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -667,9 +645,7 @@ func (c *Client) GetRunner(ctx context.Context, runnerId int64) (*RunnerReferenc req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -705,9 +681,7 @@ func (c *Client) GetRunnerByName(ctx context.Context, runnerName string) (*Runne req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -752,9 +726,7 @@ func (c *Client) RemoveRunner(ctx context.Context, runnerId int64) error { req.Header.Set("User-Agent", c.userAgent) } - httpClient := c.getHTTPClient() - - resp, err := httpClient.Do(req) + resp, err := c.Do(req) if err != nil { return err } @@ -1012,24 +984,6 @@ func createJWTForGitHubApp(appAuth *GitHubAppAuth) (string, error) { return token.SignedString(privateKey) } -func (c *Client) getHTTPClient() *http.Client { - if c.Client != nil { - return c.Client - } - - retryClient := retryablehttp.NewClient() - - if c.RetryMax != nil { - retryClient.RetryMax = *c.RetryMax - } - - if c.RetryWaitMax != nil { - retryClient.RetryWaitMax = *c.RetryWaitMax - } - - return retryClient.StandardClient() -} - func unmarshalBody(response *http.Response, v interface{}) (err error) { if response != nil && response.Body != nil { var err error diff --git a/github/actions/client_generate_jit_test.go b/github/actions/client_generate_jit_test.go index 1b1d733047..cf594151b9 100644 --- a/github/actions/client_generate_jit_test.go +++ b/github/actions/client_generate_jit_test.go @@ -3,73 +3,60 @@ package actions_test import ( "context" "net/http" - "net/http/httptest" "testing" "time" "github.com/actions/actions-runner-controller/github/actions" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/go-retryablehttp" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestGenerateJitRunnerConfig(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } t.Run("Get JIT Config for Runner", func(t *testing.T) { - name := "Get JIT Config for Runner" want := &actions.RunnerScaleSetJitRunnerConfig{} response := []byte(`{"count":1,"value":[{"id":1,"name":"scale-set-name"}]}`) runnerSettings := &actions.RunnerScaleSetJitRunnerSetting{} - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(response) })) - defer s.Close() + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - - got, err := actionsClient.GenerateJitRunnerConfig(context.Background(), runnerSettings, 1) - if err != nil { - t.Fatalf("GenerateJitRunnerConfig got unexepected error, %v", err) - } - - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("GenerateJitRunnerConfig(%v) mismatch (-want +got):\n%s", name, diff) - } + got, err := client.GenerateJitRunnerConfig(ctx, runnerSettings, 1) + require.NoError(t, err) + assert.Equal(t, want, got) }) t.Run("Default retries on server error", func(t *testing.T) { runnerSettings := &actions.RunnerScaleSetJitRunnerSetting{} - retryClient := retryablehttp.NewClient() - retryClient.RetryWaitMax = 1 * time.Millisecond - retryClient.RetryMax = 1 - + retryMax := 1 actualRetry := 0 - expectedRetry := retryClient.RetryMax + 1 + expectedRetry := retryMax + 1 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) - defer s.Close() - - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - - _, _ = actionsClient.GenerateJitRunnerConfig(context.Background(), runnerSettings, 1) + client, err := actions.NewClient( + ctx, + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(1), + actions.WithRetryWaitMax(1*time.Millisecond), + ) + require.NoError(t, err) + + _, err = client.GenerateJitRunnerConfig(ctx, runnerSettings, 1) + assert.NotNil(t, err) assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) }) } diff --git a/github/actions/client_job_acquisition_test.go b/github/actions/client_job_acquisition_test.go index b7df3abb57..dfd0d58dad 100644 --- a/github/actions/client_job_acquisition_test.go +++ b/github/actions/client_job_acquisition_test.go @@ -3,22 +3,21 @@ package actions_test import ( "context" "net/http" - "net/http/httptest" "testing" "time" "github.com/actions/actions-runner-controller/github/actions" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/go-retryablehttp" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAcquireJobs(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } t.Run("Acquire Job", func(t *testing.T) { - name := "Acquire Job" - want := []int64{1} response := []byte(`{"value": [1]}`) @@ -28,24 +27,16 @@ func TestAcquireJobs(t *testing.T) { } requestIDs := want - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(response) })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - } - got, err := actionsClient.AcquireJobs(context.Background(), session.RunnerScaleSet.Id, session.MessageQueueAccessToken, requestIDs) - if err != nil { - t.Fatalf("CreateRunnerScaleSet got unexepected error, %v", err) - } + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("GetRunnerScaleSet(%v) mismatch (-want +got):\n%s", name, diff) - } + got, err := client.AcquireJobs(ctx, session.RunnerScaleSet.Id, session.MessageQueueAccessToken, requestIDs) + require.NoError(t, err) + assert.Equal(t, want, got) }) t.Run("Default retries on server error", func(t *testing.T) { @@ -55,90 +46,78 @@ func TestAcquireJobs(t *testing.T) { } var requestIDs []int64 = []int64{1} - retryClient := retryablehttp.NewClient() - retryClient.RetryWaitMax = 1 * time.Millisecond - retryClient.RetryMax = 1 - + retryMax := 1 actualRetry := 0 - expectedRetry := retryClient.RetryMax + 1 + expectedRetry := retryMax + 1 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) - defer s.Close() - - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - } - - _, _ = actionsClient.AcquireJobs(context.Background(), session.RunnerScaleSet.Id, session.MessageQueueAccessToken, requestIDs) + client, err := actions.NewClient( + ctx, + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(retryMax), + actions.WithRetryWaitMax(1*time.Millisecond), + ) + require.NoError(t, err) + + _, err = client.AcquireJobs(context.Background(), session.RunnerScaleSet.Id, session.MessageQueueAccessToken, requestIDs) + assert.NotNil(t, err) assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) }) } func TestGetAcquirableJobs(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } t.Run("Acquire Job", func(t *testing.T) { - name := "Acquire Job" - want := &actions.AcquirableJobList{} response := []byte(`{"count": 0}`) runnerScaleSet := &actions.RunnerScaleSet{Id: 1} - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(response) })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - got, err := actionsClient.GetAcquirableJobs(context.Background(), runnerScaleSet.Id) - if err != nil { - t.Fatalf("GetAcquirableJobs got unexepected error, %v", err) - } + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("GetAcquirableJobs(%v) mismatch (-want +got):\n%s", name, diff) - } + got, err := client.GetAcquirableJobs(context.Background(), runnerScaleSet.Id) + require.NoError(t, err) + assert.Equal(t, want, got) }) t.Run("Default retries on server error", func(t *testing.T) { runnerScaleSet := &actions.RunnerScaleSet{Id: 1} - retryClient := retryablehttp.NewClient() - retryClient.RetryWaitMax = 1 * time.Millisecond - retryClient.RetryMax = 1 + retryMax := 1 actualRetry := 0 - expectedRetry := retryClient.RetryMax + 1 + expectedRetry := retryMax + 1 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) - defer s.Close() - - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - - _, _ = actionsClient.GetAcquirableJobs(context.Background(), runnerScaleSet.Id) + client, err := actions.NewClient( + context.Background(), + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(retryMax), + actions.WithRetryWaitMax(1*time.Millisecond), + ) + require.NoError(t, err) + + _, err = client.GetAcquirableJobs(context.Background(), runnerScaleSet.Id) + require.Error(t, err) assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) }) } diff --git a/github/actions/client_runner_scale_set_message_test.go b/github/actions/client_runner_scale_set_message_test.go index 2252f5a728..55e80267a4 100644 --- a/github/actions/client_runner_scale_set_message_test.go +++ b/github/actions/client_runner_scale_set_message_test.go @@ -5,18 +5,20 @@ import ( "encoding/json" "errors" "net/http" - "net/http/httptest" "testing" "time" "github.com/actions/actions-runner-controller/github/actions" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/go-retryablehttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestGetMessage(t *testing.T) { + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" runnerScaleSetMessage := &actions.RunnerScaleSetMessage{ MessageId: 1, @@ -26,89 +28,54 @@ func TestGetMessage(t *testing.T) { t.Run("Get Runner Scale Set Message", func(t *testing.T) { want := runnerScaleSetMessage response := []byte(`{"messageId":1,"messageType":"rssType"}`) - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + s := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(response) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - - got, err := actionsClient.GetMessage(context.Background(), s.URL, token, 0) - if err != nil { - t.Fatalf("GetMessage got unexepected error, %v", err) - } + client, err := actions.NewClient(ctx, s.configURLForOrg("my-org"), auth) + require.NoError(t, err) - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("GetMessage mismatch (-want +got):\n%s", diff) - } + got, err := client.GetMessage(ctx, s.URL, token, 0) + require.NoError(t, err) + assert.Equal(t, want, got) }) t.Run("Default retries on server error", func(t *testing.T) { - retryClient := retryablehttp.NewClient() - retryClient.RetryWaitMax = 1 * time.Nanosecond - retryClient.RetryMax = 1 + retryMax := 1 actualRetry := 0 - expectedRetry := retryClient.RetryMax + 1 + expectedRetry := retryMax + 1 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) - defer s.Close() - - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - - _, _ = actionsClient.GetMessage(context.Background(), s.URL, token, 0) + client, err := actions.NewClient( + ctx, + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(retryMax), + actions.WithRetryWaitMax(1*time.Millisecond), + ) + require.NoError(t, err) + + _, err = client.GetMessage(ctx, server.URL, token, 0) + assert.NotNil(t, err) assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) }) - t.Run("Custom retries on server error", func(t *testing.T) { - actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusServiceUnavailable) - actualRetry++ - })) - defer s.Close() - retryMax := 1 - retryWaitMax := 1 * time.Nanosecond - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - RetryMax: &retryMax, - RetryWaitMax: &retryWaitMax, - } - _, _ = actionsClient.GetMessage(context.Background(), s.URL, token, 0) - expectedRetry := retryMax + 1 - assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) - t.Run("Message token expired", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusUnauthorized) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.GetMessage(context.Background(), s.URL, token, 0) - if err == nil { - t.Fatalf("GetMessage did not get exepected error, ") - } + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.GetMessage(ctx, server.URL, token, 0) + require.NotNil(t, err) + var expectedErr *actions.MessageQueueTokenExpiredError require.True(t, errors.As(err, &expectedErr)) }, @@ -119,45 +86,38 @@ func TestGetMessage(t *testing.T) { Message: "Request returned status: 404 Not Found", StatusCode: 404, } - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotFound) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.GetMessage(context.Background(), s.URL, token, 0) - if err == nil { - t.Fatalf("GetMessage did not get exepected error, ") - } - if diff := cmp.Diff(want.Error(), err.Error()); diff != "" { - t.Errorf("GetMessage mismatch (-want +got):\n%s", diff) - } - }, - ) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.GetMessage(ctx, server.URL, token, 0) + require.NotNil(t, err) + assert.Equal(t, want.Error(), err.Error()) + }) t.Run("Error when Content-Type is text/plain", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "text/plain") })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.GetMessage(context.Background(), s.URL, token, 0) - if err == nil { - t.Fatalf("GetMessage did not get exepected error,") - } - }, - ) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.GetMessage(ctx, server.URL, token, 0) + assert.NotNil(t, err) + }) } func TestDeleteMessage(t *testing.T) { + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } + token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" runnerScaleSetMessage := &actions.RunnerScaleSetMessage{ MessageId: 1, @@ -165,105 +125,83 @@ func TestDeleteMessage(t *testing.T) { } t.Run("Delete existing message", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNoContent) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - err := actionsClient.DeleteMessage(context.Background(), s.URL, token, runnerScaleSetMessage.MessageId) - if err != nil { - t.Fatalf("DeleteMessage got unexepected error, %v", err) - } - }, - ) + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + err = client.DeleteMessage(ctx, server.URL, token, runnerScaleSetMessage.MessageId) + assert.Nil(t, err) + }) t.Run("Message token expired", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusUnauthorized) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - err := actionsClient.DeleteMessage(context.Background(), s.URL, token, 0) - if err == nil { - t.Fatalf("DeleteMessage did not get exepected error, ") - } + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + err = client.DeleteMessage(ctx, server.URL, token, 0) + require.NotNil(t, err) var expectedErr *actions.MessageQueueTokenExpiredError - require.True(t, errors.As(err, &expectedErr)) - }, - ) + assert.True(t, errors.As(err, &expectedErr)) + }) t.Run("Error when Content-Type is text/plain", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "text/plain") })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - err := actionsClient.DeleteMessage(context.Background(), s.URL, token, runnerScaleSetMessage.MessageId) - if err == nil { - t.Fatalf("DeleteMessage did not get exepected error") - } + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + err = client.DeleteMessage(ctx, server.URL, token, runnerScaleSetMessage.MessageId) + require.NotNil(t, err) var expectedErr *actions.ActionsError - require.True(t, errors.As(err, &expectedErr)) + assert.True(t, errors.As(err, &expectedErr)) }, ) t.Run("Default retries on server error", func(t *testing.T) { actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) - defer s.Close() - retryClient := retryablehttp.NewClient() + retryMax := 1 - retryClient.RetryWaitMax = time.Nanosecond - retryClient.RetryMax = retryMax - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _ = actionsClient.DeleteMessage(context.Background(), s.URL, token, runnerScaleSetMessage.MessageId) + client, err := actions.NewClient( + ctx, + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(retryMax), + actions.WithRetryWaitMax(1*time.Nanosecond), + ) + require.NoError(t, err) + err = client.DeleteMessage(ctx, server.URL, token, runnerScaleSetMessage.MessageId) + assert.NotNil(t, err) expectedRetry := retryMax + 1 assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) + }) t.Run("No message found", func(t *testing.T) { want := (*actions.RunnerScaleSetMessage)(nil) rsl, err := json.Marshal(want) - if err != nil { - t.Fatalf("%v", err) - } - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + require.NoError(t, err) + + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(rsl) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - err = actionsClient.DeleteMessage(context.Background(), s.URL, token, runnerScaleSetMessage.MessageId+1) + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + err = client.DeleteMessage(ctx, server.URL, token, runnerScaleSetMessage.MessageId+1) var expectedErr *actions.ActionsError require.True(t, errors.As(err, &expectedErr)) - }, - ) + }) } diff --git a/github/actions/client_runner_scale_set_session_test.go b/github/actions/client_runner_scale_set_session_test.go index e3fc31936a..f5fbceb76b 100644 --- a/github/actions/client_runner_scale_set_session_test.go +++ b/github/actions/client_runner_scale_set_session_test.go @@ -4,19 +4,22 @@ import ( "context" "errors" "net/http" - "net/http/httptest" "testing" "time" "github.com/actions/actions-runner-controller/github/actions" - "github.com/google/go-cmp/cmp" "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCreateMessageSession(t *testing.T) { + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } + t.Run("CreateMessageSession unmarshals correctly", func(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" owner := "foo" runnerScaleSet := actions.RunnerScaleSet{ Id: 1, @@ -35,7 +38,7 @@ func TestCreateMessageSession(t *testing.T) { MessageQueueAccessToken: "fake.jwt.here", } - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { resp := []byte(`{ "ownerName": "foo", "runnerScaleSet": { @@ -47,31 +50,16 @@ func TestCreateMessageSession(t *testing.T) { }`) w.Write(resp) })) - defer srv.Close() - - retryMax := 1 - retryWaitMax := 1 * time.Microsecond - - actionsClient := actions.Client{ - ActionsServiceURL: &srv.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - RetryMax: &retryMax, - RetryWaitMax: &retryWaitMax, - } - got, err := actionsClient.CreateMessageSession(context.Background(), runnerScaleSet.Id, owner) - if err != nil { - t.Fatalf("CreateMessageSession got unexpected error: %v", err) - } + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - if diff := cmp.Diff(got, want); diff != "" { - t.Fatalf("CreateMessageSession got unexpected diff: -want +got: %v", diff) - } + got, err := client.CreateMessageSession(ctx, runnerScaleSet.Id, owner) + require.NoError(t, err) + assert.Equal(t, want, got) }) t.Run("CreateMessageSession unmarshals errors into ActionsError", func(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" owner := "foo" runnerScaleSet := actions.RunnerScaleSet{ Id: 1, @@ -86,44 +74,32 @@ func TestCreateMessageSession(t *testing.T) { StatusCode: http.StatusBadRequest, } - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusBadRequest) resp := []byte(`{"typeName": "CSharpExceptionNameHere","message": "could not do something"}`) w.Write(resp) })) - defer srv.Close() - retryMax := 1 - retryWaitMax := 1 * time.Microsecond + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - actionsClient := actions.Client{ - ActionsServiceURL: &srv.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - RetryMax: &retryMax, - RetryWaitMax: &retryWaitMax, - } - - got, err := actionsClient.CreateMessageSession(context.Background(), runnerScaleSet.Id, owner) - if err == nil { - t.Fatalf("CreateMessageSession did not get expected error: %v", got) - } + _, err = client.CreateMessageSession(ctx, runnerScaleSet.Id, owner) + require.NotNil(t, err) errorTypeForComparison := &actions.ActionsError{} - if isActionsError := errors.As(err, &errorTypeForComparison); !isActionsError { - t.Fatalf("CreateMessageSession expected to be able to parse the error into ActionsError type: %v", err) - } + assert.True( + t, + errors.As(err, &errorTypeForComparison), + "CreateMessageSession expected to be able to parse the error into ActionsError type: %v", + err, + ) gotErr := err.(*actions.ActionsError) - - if diff := cmp.Diff(want, gotErr); diff != "" { - t.Fatalf("CreateMessageSession got unexpected diff: -want +got: %v", diff) - } + assert.Equal(t, want, gotErr) }) t.Run("CreateMessageSession call is retried the correct amount of times", func(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" owner := "foo" runnerScaleSet := actions.RunnerScaleSet{ Id: 1, @@ -133,37 +109,38 @@ func TestCreateMessageSession(t *testing.T) { } gotRetries := 0 - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) gotRetries++ })) - defer srv.Close() retryMax := 3 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } + retryWaitMax := 1 * time.Microsecond wantRetries := retryMax + 1 - actionsClient := actions.Client{ - ActionsServiceURL: &srv.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - RetryMax: &retryMax, - RetryWaitMax: &retryWaitMax, - } - - _, _ = actionsClient.CreateMessageSession(context.Background(), runnerScaleSet.Id, owner) - + client, err := actions.NewClient( + ctx, + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(retryMax), + actions.WithRetryWaitMax(retryWaitMax), + ) + require.NoError(t, err) + + _, err = client.CreateMessageSession(ctx, runnerScaleSet.Id, owner) + assert.NotNil(t, err) assert.Equalf(t, gotRetries, wantRetries, "CreateMessageSession got unexpected retry count: got=%v, want=%v", gotRetries, wantRetries) }) } func TestDeleteMessageSession(t *testing.T) { + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } + t.Run("DeleteMessageSession call is retried the correct amount of times", func(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" runnerScaleSet := actions.RunnerScaleSet{ Id: 1, Name: "ScaleSet", @@ -172,39 +149,40 @@ func TestDeleteMessageSession(t *testing.T) { } gotRetries := 0 - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) gotRetries++ })) - defer srv.Close() retryMax := 3 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } + retryWaitMax := 1 * time.Microsecond wantRetries := retryMax + 1 - actionsClient := actions.Client{ - ActionsServiceURL: &srv.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - RetryMax: &retryMax, - RetryWaitMax: &retryWaitMax, - } + client, err := actions.NewClient( + ctx, + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(retryMax), + actions.WithRetryWaitMax(retryWaitMax), + ) + require.NoError(t, err) sessionId := uuid.New() - _ = actionsClient.DeleteMessageSession(context.Background(), runnerScaleSet.Id, &sessionId) - + err = client.DeleteMessageSession(ctx, runnerScaleSet.Id, &sessionId) + assert.NotNil(t, err) assert.Equalf(t, gotRetries, wantRetries, "CreateMessageSession got unexpected retry count: got=%v, want=%v", gotRetries, wantRetries) }) } func TestRefreshMessageSession(t *testing.T) { + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } + t.Run("RefreshMessageSession call is retried the correct amount of times", func(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" runnerScaleSet := actions.RunnerScaleSet{ Id: 1, Name: "ScaleSet", @@ -213,32 +191,29 @@ func TestRefreshMessageSession(t *testing.T) { } gotRetries := 0 - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusInternalServerError) gotRetries++ })) - defer srv.Close() retryMax := 3 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } + retryWaitMax := 1 * time.Microsecond wantRetries := retryMax + 1 - actionsClient := actions.Client{ - ActionsServiceURL: &srv.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - RetryMax: &retryMax, - RetryWaitMax: &retryWaitMax, - } + client, err := actions.NewClient( + ctx, + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(retryMax), + actions.WithRetryWaitMax(retryWaitMax), + ) + require.NoError(t, err) sessionId := uuid.New() - _, _ = actionsClient.RefreshMessageSession(context.Background(), runnerScaleSet.Id, &sessionId) - + _, err = client.RefreshMessageSession(context.Background(), runnerScaleSet.Id, &sessionId) + assert.NotNil(t, err) assert.Equalf(t, gotRetries, wantRetries, "CreateMessageSession got unexpected retry count: got=%v, want=%v", gotRetries, wantRetries) }) } diff --git a/github/actions/client_runner_scale_set_test.go b/github/actions/client_runner_scale_set_test.go index fb4ba2c532..e9d86e2d8b 100644 --- a/github/actions/client_runner_scale_set_test.go +++ b/github/actions/client_runner_scale_set_test.go @@ -6,853 +6,333 @@ import ( "errors" "fmt" "net/http" - "net/http/httptest" "net/url" "testing" "time" "github.com/actions/actions-runner-controller/github/actions" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/go-retryablehttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestGetRunnerScaleSet(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } + scaleSetName := "ScaleSet" runnerScaleSet := actions.RunnerScaleSet{Id: 1, Name: scaleSetName} t.Run("Get existing scale set", func(t *testing.T) { want := &runnerScaleSet runnerScaleSetsResp := []byte(`{"count":1,"value":[{"id":1,"name":"ScaleSet"}]}`) - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(runnerScaleSetsResp) })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - got, err := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) - if err != nil { - t.Fatalf("CreateRunnerScaleSet got unexepected error, %v", err) - } - - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("GetRunnerScaleSet(%v) mismatch (-want +got):\n%s", scaleSetName, diff) - } - }, - ) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + got, err := client.GetRunnerScaleSet(ctx, scaleSetName) + require.NoError(t, err) + assert.Equal(t, want, got) + }) t.Run("GetRunnerScaleSet calls correct url", func(t *testing.T) { runnerScaleSetsResp := []byte(`{"count":1,"value":[{"id":1,"name":"ScaleSet"}]}`) url := url.URL{} - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write(runnerScaleSetsResp) url = *r.URL })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) - if err != nil { - t.Fatalf("CreateRunnerScaleSet got unexepected error, %v", err) - } + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.GetRunnerScaleSet(ctx, scaleSetName) + require.NoError(t, err) u := url.String() expectedUrl := fmt.Sprintf("/_apis/runtime/runnerscalesets?name=%s&api-version=6.0-preview", scaleSetName) assert.Equal(t, expectedUrl, u) - - }, - ) + }) t.Run("Status code not found", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotFound) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) - if err == nil { - t.Fatalf("GetRunnerScaleSet did not get exepected error, ") - } - }, - ) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.GetRunnerScaleSet(ctx, scaleSetName) + assert.NotNil(t, err) + }) t.Run("Error when Content-Type is text/plain", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "text/plain") })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) - if err == nil { - t.Fatalf("GetRunnerScaleSet did not get exepected error,") - } - }, - ) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.GetRunnerScaleSet(ctx, scaleSetName) + assert.NotNil(t, err) + }) t.Run("Default retries on server error", func(t *testing.T) { actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) - defer s.Close() - retryClient := retryablehttp.NewClient() - retryMax := 1 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } - retryClient.RetryWaitMax = retryWaitMax - retryClient.RetryMax = retryMax - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, _ = actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) - expectedRetry := retryMax + 1 - assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) - t.Run("Custom retries on server error", func(t *testing.T) { - actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusServiceUnavailable) - actualRetry++ - })) - defer s.Close() retryMax := 1 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - RetryMax: &retryMax, - RetryWaitMax: &retryWaitMax, - } - _, _ = actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) + retryWaitMax := 1 * time.Microsecond + + client, err := actions.NewClient( + ctx, + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(retryMax), + actions.WithRetryWaitMax(retryWaitMax), + ) + require.NoError(t, err) + + _, err = client.GetRunnerScaleSet(ctx, scaleSetName) + assert.NotNil(t, err) expectedRetry := retryMax + 1 assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) + }) t.Run("RunnerScaleSet count is zero", func(t *testing.T) { want := (*actions.RunnerScaleSet)(nil) runnerScaleSetsResp := []byte(`{"count":0,"value":[{"id":1,"name":"ScaleSet"}]}`) - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(runnerScaleSetsResp) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - got, _ := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("GetRunnerScaleSet(%v) mismatch (-want +got):\n%s", scaleSetName, diff) - } - - }, - ) + got, err := client.GetRunnerScaleSet(ctx, scaleSetName) + require.NoError(t, err) + assert.Equal(t, want, got) + }) t.Run("Multiple runner scale sets found", func(t *testing.T) { wantErr := fmt.Errorf("multiple runner scale sets found with name %s", scaleSetName) runnerScaleSetsResp := []byte(`{"count":2,"value":[{"id":1,"name":"ScaleSet"}]}`) - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(runnerScaleSetsResp) })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.GetRunnerScaleSet(context.Background(), scaleSetName) - if err == nil { - t.Fatalf("GetRunnerScaleSet did not get exepected error, %v", wantErr) - } + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - if diff := cmp.Diff(wantErr.Error(), err.Error()); diff != "" { - t.Errorf("GetRunnerScaleSet(%v) mismatch (-want +got):\n%s", scaleSetName, diff) - } - - }, - ) + _, err = client.GetRunnerScaleSet(ctx, scaleSetName) + require.NotNil(t, err) + assert.Equal(t, wantErr.Error(), err.Error()) + }) } func TestGetRunnerScaleSetById(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } + scaleSetCreationDateTime := time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC) runnerScaleSet := actions.RunnerScaleSet{Id: 1, Name: "ScaleSet", CreatedOn: scaleSetCreationDateTime, RunnerSetting: actions.RunnerSetting{}} t.Run("Get existing scale set by Id", func(t *testing.T) { want := &runnerScaleSet rsl, err := json.Marshal(want) - if err != nil { - t.Fatalf("%v", err) - } - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + require.NoError(t, err) + sservere := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(rsl) })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - got, err := actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) - if err != nil { - t.Fatalf("GetRunnerScaleSetById got unexepected error, %v", err) - } - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("GetRunnerScaleSetById(%d) mismatch (-want +got):\n%s", runnerScaleSet.Id, diff) - } - }, - ) + + client, err := actions.NewClient(ctx, sservere.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + got, err := client.GetRunnerScaleSetById(ctx, runnerScaleSet.Id) + require.NoError(t, err) + assert.Equal(t, want, got) + }) t.Run("GetRunnerScaleSetById calls correct url", func(t *testing.T) { rsl, err := json.Marshal(&runnerScaleSet) - if err != nil { - t.Fatalf("%v", err) - } + require.NoError(t, err) + url := url.URL{} - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write(rsl) url = *r.URL })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err = actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) - if err != nil { - t.Fatalf("GetRunnerScaleSetById got unexepected error, %v", err) - } + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.GetRunnerScaleSetById(ctx, runnerScaleSet.Id) + require.NoError(t, err) u := url.String() expectedUrl := fmt.Sprintf("/_apis/runtime/runnerscalesets/%d?api-version=6.0-preview", runnerScaleSet.Id) assert.Equal(t, expectedUrl, u) - - }, - ) + }) t.Run("Status code not found", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotFound) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) - if err == nil { - t.Fatalf("GetRunnerScaleSetById did not get exepected error, ") - } - }, - ) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.GetRunnerScaleSetById(ctx, runnerScaleSet.Id) + assert.NotNil(t, err) + }) t.Run("Error when Content-Type is text/plain", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "text/plain") })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) - if err == nil { - t.Fatalf("GetRunnerScaleSetById did not get exepected error,") - } - }, - ) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.GetRunnerScaleSetById(ctx, runnerScaleSet.Id) + assert.NotNil(t, err) + }) t.Run("Default retries on server error", func(t *testing.T) { actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) - defer s.Close() - retryClient := retryablehttp.NewClient() - retryMax := 1 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } - retryClient.RetryWaitMax = retryWaitMax - retryClient.RetryMax = retryMax - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, _ = actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) - expectedRetry := retryMax + 1 - assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) - t.Run("Custom retries on server error", func(t *testing.T) { - actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusServiceUnavailable) - actualRetry++ - })) - defer s.Close() retryMax := 1 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - RetryMax: &retryMax, - RetryWaitMax: &retryWaitMax, - } - _, _ = actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) + retryWaitMax := 1 * time.Microsecond + client, err := actions.NewClient( + ctx, + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(retryMax), + actions.WithRetryWaitMax(retryWaitMax), + ) + require.NoError(t, err) + + _, err = client.GetRunnerScaleSetById(ctx, runnerScaleSet.Id) + require.NotNil(t, err) expectedRetry := retryMax + 1 assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) + }) t.Run("No RunnerScaleSet found", func(t *testing.T) { want := (*actions.RunnerScaleSet)(nil) rsl, err := json.Marshal(want) - if err != nil { - t.Fatalf("%v", err) - } - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + require.NoError(t, err) + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(rsl) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - got, _ := actionsClient.GetRunnerScaleSetById(context.Background(), runnerScaleSet.Id) + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("GetRunnerScaleSetById(%v) mismatch (-want +got):\n%s", runnerScaleSet.Id, diff) - } - - }, - ) + got, err := client.GetRunnerScaleSetById(ctx, runnerScaleSet.Id) + require.NoError(t, err) + assert.Equal(t, want, got) + }) } func TestCreateRunnerScaleSet(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } + scaleSetCreationDateTime := time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC) runnerScaleSet := actions.RunnerScaleSet{Id: 1, Name: "ScaleSet", CreatedOn: scaleSetCreationDateTime, RunnerSetting: actions.RunnerSetting{}} t.Run("Create runner scale set", func(t *testing.T) { want := &runnerScaleSet rsl, err := json.Marshal(want) - if err != nil { - t.Fatalf("%v", err) - } - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + require.NoError(t, err) + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(rsl) })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - got, err := actionsClient.CreateRunnerScaleSet(context.Background(), &runnerScaleSet) - if err != nil { - t.Fatalf("CreateRunnerScaleSet got exepected error, %v", err) - } - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("CreateRunnerScaleSet(%d) mismatch (-want +got):\n%s", runnerScaleSet.Id, diff) - } - }, - ) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + got, err := client.CreateRunnerScaleSet(ctx, &runnerScaleSet) + require.NoError(t, err) + assert.Equal(t, want, got) + }) t.Run("CreateRunnerScaleSet calls correct url", func(t *testing.T) { rsl, err := json.Marshal(&runnerScaleSet) - if err != nil { - t.Fatalf("%v", err) - } + require.NoError(t, err) url := url.URL{} - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write(rsl) url = *r.URL })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err = actionsClient.CreateRunnerScaleSet(context.Background(), &runnerScaleSet) - if err != nil { - t.Fatalf("CreateRunnerScaleSet got unexepected error, %v", err) - } - - u := url.String() - expectedUrl := "/_apis/runtime/runnerscalesets?api-version=6.0-preview" - assert.Equal(t, expectedUrl, u) - }, - ) + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - t.Run("Error when Content-Type is text/plain", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusBadRequest) - w.Header().Set("Content-Type", "text/plain") - })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.CreateRunnerScaleSet(context.Background(), &runnerScaleSet) - if err == nil { - t.Fatalf("CreateRunnerScaleSet did not get exepected error, %v", &actions.ActionsError{}) - } - var expectedErr *actions.ActionsError - require.True(t, errors.As(err, &expectedErr)) - }, - ) - - t.Run("Default retries on server error", func(t *testing.T) { - actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusServiceUnavailable) - actualRetry++ - })) - defer s.Close() - retryClient := retryablehttp.NewClient() - retryMax := 1 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } - retryClient.RetryMax = retryMax - retryClient.RetryWaitMax = retryWaitMax - - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, _ = actionsClient.CreateRunnerScaleSet(context.Background(), &runnerScaleSet) - expectedRetry := retryMax + 1 - assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) - - t.Run("Custom retries on server error", func(t *testing.T) { - actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusServiceUnavailable) - actualRetry++ - })) - defer s.Close() - retryMax := 1 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - RetryMax: &retryMax, - RetryWaitMax: &retryWaitMax, - } - _, _ = actionsClient.CreateRunnerScaleSet(context.Background(), &runnerScaleSet) - expectedRetry := retryMax + 1 - assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) -} - -func TestUpdateRunnerScaleSet(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" - scaleSetCreationDateTime := time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC) - runnerScaleSet := actions.RunnerScaleSet{Id: 1, Name: "ScaleSet", CreatedOn: scaleSetCreationDateTime, RunnerSetting: actions.RunnerSetting{}} - - t.Run("Update existing scale set", func(t *testing.T) { - want := &runnerScaleSet - rsl, err := json.Marshal(want) - if err != nil { - t.Fatalf("%v", err) - } - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.Write(rsl) - })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - got, err := actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, want) - if err != nil { - t.Fatalf("UpdateRunnerScaleSet got exepected error, %v", err) - } - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("UpdateRunnerScaleSet(%d) mismatch (-want +got):\n%s", runnerScaleSet.Id, diff) - } - }, - ) - - t.Run("UpdateRunnerScaleSet calls correct url", func(t *testing.T) { - rsl, err := json.Marshal(&runnerScaleSet) - if err != nil { - t.Fatalf("%v", err) - } - url := url.URL{} - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write(rsl) - url = *r.URL - })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err = actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) - if err != nil { - t.Fatalf("UpdateRunnerScaleSet got unexepected error, %v", err) - } + _, err = client.CreateRunnerScaleSet(ctx, &runnerScaleSet) + require.NoError(t, err) u := url.String() - expectedUrl := fmt.Sprintf("/_apis/runtime/runnerscalesets/%d?api-version=6.0-preview", runnerScaleSet.Id) + expectedUrl := "/_apis/runtime/runnerscalesets?api-version=6.0-preview" assert.Equal(t, expectedUrl, u) - - }, - ) - - t.Run("Status code not found", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusNotFound) - })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) - if err == nil { - t.Fatalf("UpdateRunnerScaleSet did not get exepected error,") - } - var expectedErr *actions.ActionsError - require.True(t, errors.As(err, &expectedErr)) - }, - ) + }) t.Run("Error when Content-Type is text/plain", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "text/plain") })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, err := actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) - if err == nil { - t.Fatalf("UpdateRunnerScaleSet did not get exepected error") - } - var expectedErr *actions.ActionsError - require.True(t, errors.As(err, &expectedErr)) - }, - ) - - t.Run("Default retries on server error", func(t *testing.T) { - actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusServiceUnavailable) - actualRetry++ - })) - defer s.Close() - retryClient := retryablehttp.NewClient() - retryMax := 1 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } - retryClient.RetryWaitMax = retryWaitMax - retryClient.RetryMax = retryMax - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _, _ = actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) - expectedRetry := retryMax + 1 - assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) - t.Run("Custom retries on server error", func(t *testing.T) { - actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusServiceUnavailable) - actualRetry++ - })) - defer s.Close() - retryMax := 1 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - RetryMax: &retryMax, - RetryWaitMax: &retryWaitMax, - } - _, _ = actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) - expectedRetry := retryMax + 1 - assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - t.Run("No RunnerScaleSet found", func(t *testing.T) { - want := (*actions.RunnerScaleSet)(nil) - rsl, err := json.Marshal(want) - if err != nil { - t.Fatalf("%v", err) - } - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.Write(rsl) - })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - got, err := actionsClient.UpdateRunnerScaleSet(context.Background(), runnerScaleSet.Id, &runnerScaleSet) - if err != nil { - t.Fatalf("UpdateRunnerScaleSet got unexepected error, %v", err) - } - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("UpdateRunnerScaleSet(%v) mismatch (-want +got):\n%s", runnerScaleSet.Id, diff) - } - - }, - ) -} - -func TestDeleteRunnerScaleSet(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" - scaleSetCreationDateTime := time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC) - runnerScaleSet := actions.RunnerScaleSet{Id: 1, Name: "ScaleSet", CreatedOn: scaleSetCreationDateTime, RunnerSetting: actions.RunnerSetting{}} - - t.Run("Delete existing scale set", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusNoContent) - })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - err := actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) - if err != nil { - t.Fatalf("DeleteRunnerScaleSet got unexepected error, %v", err) - } - }, - ) - - t.Run("DeleteRunnerScaleSet calls correct url", func(t *testing.T) { - url := url.URL{} - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNoContent) - url = *r.URL - })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - err := actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) - if err != nil { - t.Fatalf("DeleteRunnerScaleSet got unexepected error, %v", err) - } - - u := url.String() - expectedUrl := fmt.Sprintf("/_apis/runtime/runnerscalesets/%d?api-version=6.0-preview", runnerScaleSet.Id) - assert.Equal(t, expectedUrl, u) - - }, - ) - - t.Run("Status code not found", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusNotFound) - })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - err := actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) - if err == nil { - t.Fatalf("DeleteRunnerScaleSet did not get exepected error, ") - } + _, err = client.CreateRunnerScaleSet(ctx, &runnerScaleSet) + require.NotNil(t, err) var expectedErr *actions.ActionsError - require.True(t, errors.As(err, &expectedErr)) - }, - ) - - t.Run("Error when Content-Type is text/plain", func(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusBadRequest) - w.Header().Set("Content-Type", "text/plain") - })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - err := actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) - if err == nil { - t.Fatalf("DeleteRunnerScaleSet did not get exepected error") - } - var expectedErr *actions.ActionsError - require.True(t, errors.As(err, &expectedErr)) - }, - ) + assert.True(t, errors.As(err, &expectedErr)) + }) t.Run("Default retries on server error", func(t *testing.T) { actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) - defer s.Close() - retryClient := retryablehttp.NewClient() - retryMax := 1 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } - retryClient.RetryWaitMax = retryWaitMax - retryClient.RetryMax = retryMax - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - _ = actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) - expectedRetry := retryMax + 1 - assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) - t.Run("Custom retries on server error", func(t *testing.T) { - actualRetry := 0 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusServiceUnavailable) - actualRetry++ - })) - defer s.Close() retryMax := 1 - retryWaitMax, err := time.ParseDuration("1µs") - if err != nil { - t.Fatalf("%v", err) - } - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - RetryMax: &retryMax, - RetryWaitMax: &retryWaitMax, - } - _ = actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) + retryWaitMax := 1 * time.Microsecond + + client, err := actions.NewClient( + ctx, + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(retryMax), + actions.WithRetryWaitMax(retryWaitMax), + ) + require.NoError(t, err) + + _, err = client.CreateRunnerScaleSet(ctx, &runnerScaleSet) + require.NotNil(t, err) expectedRetry := retryMax + 1 assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) - }, - ) - - t.Run("No RunnerScaleSet found", func(t *testing.T) { - want := (*actions.RunnerScaleSet)(nil) - rsl, err := json.Marshal(want) - if err != nil { - t.Fatalf("%v", err) - } - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.Write(rsl) - })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - err = actionsClient.DeleteRunnerScaleSet(context.Background(), runnerScaleSet.Id) - var expectedErr *actions.ActionsError - require.True(t, errors.As(err, &expectedErr)) - }, - ) + }) } diff --git a/github/actions/client_runner_test.go b/github/actions/client_runner_test.go index a8184b57b4..9406425adc 100644 --- a/github/actions/client_runner_test.go +++ b/github/actions/client_runner_test.go @@ -3,23 +3,21 @@ package actions_test import ( "context" "net/http" - "net/http/httptest" "testing" "time" "github.com/actions/actions-runner-controller/github/actions" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/go-retryablehttp" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -var tokenExpireAt = time.Now().Add(10 * time.Minute) - func TestGetRunner(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } t.Run("Get Runner", func(t *testing.T) { - name := "Get Runner" var runnerID int64 = 1 want := &actions.RunnerReference{ Id: int(runnerID), @@ -27,59 +25,45 @@ func TestGetRunner(t *testing.T) { } response := []byte(`{"id": 1, "name": "self-hosted-ubuntu"}`) - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write(response) })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - got, err := actionsClient.GetRunner(context.Background(), runnerID) - if err != nil { - t.Fatalf("GetRunner got unexepected error, %v", err) - } + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("GetRunner(%v) mismatch (-want +got):\n%s", name, diff) - } + got, err := client.GetRunner(ctx, runnerID) + require.NoError(t, err) + assert.Equal(t, want, got) }) t.Run("Default retries on server error", func(t *testing.T) { var runnerID int64 = 1 - retryClient := retryablehttp.NewClient() - retryClient.RetryWaitMax = 1 * time.Millisecond - retryClient.RetryMax = 1 + retryWaitMax := 1 * time.Millisecond + retryMax := 1 actualRetry := 0 - expectedRetry := retryClient.RetryMax + 1 + expectedRetry := retryMax + 1 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) - defer s.Close() - - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - - _, _ = actionsClient.GetRunner(context.Background(), runnerID) + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), actions.WithRetryWaitMax(retryWaitMax)) + require.NoError(t, err) + _, err = client.GetRunner(ctx, runnerID) + require.Error(t, err) assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) }) } func TestGetRunnerByName(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } t.Run("Get Runner by Name", func(t *testing.T) { var runnerID int64 = 1 @@ -90,130 +74,102 @@ func TestGetRunnerByName(t *testing.T) { } response := []byte(`{"count": 1, "value": [{"id": 1, "name": "self-hosted-ubuntu"}]}`) - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write(response) })) - defer s.Close() - - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - got, err := actionsClient.GetRunnerByName(context.Background(), runnerName) - if err != nil { - t.Fatalf("GetRunnerByName got unexepected error, %v", err) - } + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("GetRunnerByName(%v) mismatch (-want +got):\n%s", runnerName, diff) - } + got, err := client.GetRunnerByName(ctx, runnerName) + require.NoError(t, err) + assert.Equal(t, want, got) }) t.Run("Get Runner by name with not exist runner", func(t *testing.T) { var runnerName string = "self-hosted-ubuntu" response := []byte(`{"count": 0, "value": []}`) - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write(response) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - - got, err := actionsClient.GetRunnerByName(context.Background(), runnerName) - if err != nil { - t.Fatalf("GetRunnerByName got unexepected error, %v", err) - } + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - if diff := cmp.Diff((*actions.RunnerReference)(nil), got); diff != "" { - t.Errorf("GetRunnerByName(%v) mismatch (-want +got):\n%s", runnerName, diff) - } + got, err := client.GetRunnerByName(ctx, runnerName) + require.NoError(t, err) + assert.Nil(t, got) }) t.Run("Default retries on server error", func(t *testing.T) { var runnerName string = "self-hosted-ubuntu" - retryClient := retryablehttp.NewClient() - retryClient.RetryWaitMax = 1 * time.Millisecond - retryClient.RetryMax = 1 + + retryWaitMax := 1 * time.Millisecond + retryMax := 1 actualRetry := 0 - expectedRetry := retryClient.RetryMax + 1 + expectedRetry := retryMax + 1 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) - defer s.Close() - - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - - _, _ = actionsClient.GetRunnerByName(context.Background(), runnerName) + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), actions.WithRetryWaitMax(retryWaitMax)) + require.NoError(t, err) + _, err = client.GetRunnerByName(ctx, runnerName) + require.Error(t, err) assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) }) } func TestDeleteRunner(t *testing.T) { - token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } t.Run("Delete Runner", func(t *testing.T) { var runnerID int64 = 1 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNoContent) })) - defer s.Close() - actionsClient := actions.Client{ - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) - if err := actionsClient.RemoveRunner(context.Background(), runnerID); err != nil { - t.Fatalf("RemoveRunner got unexepected error, %v", err) - } + err = client.RemoveRunner(ctx, runnerID) + assert.NoError(t, err) }) t.Run("Default retries on server error", func(t *testing.T) { var runnerID int64 = 1 - retryClient := retryablehttp.NewClient() - retryClient.RetryWaitMax = 1 * time.Millisecond - retryClient.RetryMax = 1 + retryWaitMax := 1 * time.Millisecond + retryMax := 1 actualRetry := 0 - expectedRetry := retryClient.RetryMax + 1 + expectedRetry := retryMax + 1 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) - defer s.Close() - - httpClient := retryClient.StandardClient() - actionsClient := actions.Client{ - Client: httpClient, - ActionsServiceURL: &s.URL, - ActionsServiceAdminToken: &token, - ActionsServiceAdminTokenExpiresAt: &tokenExpireAt, - } - - _ = actionsClient.RemoveRunner(context.Background(), runnerID) + client, err := actions.NewClient( + ctx, + server.configURLForOrg("my-org"), + auth, + actions.WithRetryMax(retryMax), + actions.WithRetryWaitMax(retryWaitMax), + ) + require.NoError(t, err) + + err = client.RemoveRunner(ctx, runnerID) + require.Error(t, err) assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) }) } diff --git a/github/actions/multi_client.go b/github/actions/multi_client.go index 1d1aad1dfa..c7a53b74bc 100644 --- a/github/actions/multi_client.go +++ b/github/actions/multi_client.go @@ -104,7 +104,13 @@ func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, m.logger.Info("creating new client", "githubConfigURL", githubConfigURL, "namespace", namespace) - client, err := NewClient(ctx, githubConfigURL, &creds, m.userAgent, m.logger) + client, err := NewClient( + ctx, + githubConfigURL, + &creds, + WithUserAgent(m.userAgent), + WithLogger(m.logger), + ) if err != nil { return nil, err } diff --git a/github/actions/multi_client_test.go b/github/actions/multi_client_test.go index 11aeb7fd21..fb4a64dbe7 100644 --- a/github/actions/multi_client_test.go +++ b/github/actions/multi_client_test.go @@ -6,20 +6,15 @@ import ( "fmt" "net/http" "net/http/httptest" - "os" "strings" "testing" "time" - "github.com/actions/actions-runner-controller/logging" + "github.com/go-logr/logr" ) func TestAddClient(t *testing.T) { - logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err) - os.Exit(1) - } + logger := logr.Discard() multiClient := NewMultiClient("test-user-agent", logger).(*multiClient) ctx := context.Background() From 2a7540aad37449e2b03b07459018f2276985739a Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Mon, 23 Jan 2023 17:03:01 +0100 Subject: [PATCH 039/561] Renaming autoScaling to autoscaling in tests matching the convention (#2201) --- .../autoscalinglistener_controller_test.go | 138 +++++++++--------- .../autoscalingrunnerset_controller_test.go | 134 ++++++++--------- .../ephemeralrunner_controller_test.go | 33 ++--- .../ephemeralrunnerset_controller_test.go | 14 +- go.mod | 17 +++ go.sum | 52 +++++++ 6 files changed, 226 insertions(+), 162 deletions(-) diff --git a/controllers/actions.github.com/autoscalinglistener_controller_test.go b/controllers/actions.github.com/autoscalinglistener_controller_test.go index 839497cdc2..b16fd7c831 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller_test.go +++ b/controllers/actions.github.com/autoscalinglistener_controller_test.go @@ -19,35 +19,35 @@ import ( ) const ( - autoScalingListenerTestTimeout = time.Second * 5 - autoScalingListenerTestInterval = time.Millisecond * 250 - autoScalingListenerTestGitHubToken = "gh_token" + autoscalingListenerTestTimeout = time.Second * 5 + autoscalingListenerTestInterval = time.Millisecond * 250 + autoscalingListenerTestGitHubToken = "gh_token" ) var _ = Describe("Test AutoScalingListener controller", func() { var ctx context.Context var cancel context.CancelFunc - autoScalingNS := new(corev1.Namespace) - autoScalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet) + autoscalingNS := new(corev1.Namespace) + autoscalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet) configSecret := new(corev1.Secret) - autoScalingListener := new(actionsv1alpha1.AutoscalingListener) + autoscalingListener := new(actionsv1alpha1.AutoscalingListener) BeforeEach(func() { ctx, cancel = context.WithCancel(context.TODO()) - autoScalingNS = &corev1.Namespace{ + autoscalingNS = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-listener" + RandStringRunes(5)}, } - err := k8sClient.Create(ctx, autoScalingNS) + err := k8sClient.Create(ctx, autoscalingNS) Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") configSecret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "github-config-secret", - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, }, Data: map[string][]byte{ - "github_token": []byte(autoScalingListenerTestGitHubToken), + "github_token": []byte(autoscalingListenerTestGitHubToken), }, } @@ -55,7 +55,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { Expect(err).NotTo(HaveOccurred(), "failed to create config secret") mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, MetricsBindAddress: "0", }) Expect(err).NotTo(HaveOccurred(), "failed to create manager") @@ -70,10 +70,10 @@ var _ = Describe("Test AutoScalingListener controller", func() { min := 1 max := 10 - autoScalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, }, Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", @@ -93,20 +93,20 @@ var _ = Describe("Test AutoScalingListener controller", func() { }, } - err = k8sClient.Create(ctx, autoScalingRunnerSet) + err = k8sClient.Create(ctx, autoscalingRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") - autoScalingListener = &actionsv1alpha1.AutoscalingListener{ + autoscalingListener = &actionsv1alpha1.AutoscalingListener{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asl", - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, }, Spec: actionsv1alpha1.AutoscalingListenerSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, RunnerScaleSetId: 1, - AutoscalingRunnerSetNamespace: autoScalingRunnerSet.Namespace, - AutoscalingRunnerSetName: autoScalingRunnerSet.Name, + AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace, + AutoscalingRunnerSetName: autoscalingRunnerSet.Name, EphemeralRunnerSetName: "test-ers", MaxRunners: 10, MinRunners: 1, @@ -114,7 +114,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { }, } - err = k8sClient.Create(ctx, autoScalingListener) + err = k8sClient.Create(ctx, autoscalingListener) Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingListener") go func() { @@ -128,7 +128,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { AfterEach(func() { defer cancel() - err := k8sClient.Delete(ctx, autoScalingNS) + err := k8sClient.Delete(ctx, autoscalingNS) Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") }) @@ -138,7 +138,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { created := new(actionsv1alpha1.AutoscalingListener) Eventually( func() (string, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, created) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created) if err != nil { return "", err } @@ -147,76 +147,76 @@ var _ = Describe("Test AutoScalingListener controller", func() { } return created.Finalizers[0], nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer") // Check if secret is created mirrorSecret := new(corev1.Secret) Eventually( func() (string, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoScalingListener), Namespace: autoScalingListener.Namespace}, mirrorSecret) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoscalingListener), Namespace: autoscalingListener.Namespace}, mirrorSecret) if err != nil { return "", err } return string(mirrorSecret.Data["github_token"]), nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListenerTestGitHubToken), "Mirror secret should be created") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerTestGitHubToken), "Mirror secret should be created") // Check if service account is created serviceAccount := new(corev1.ServiceAccount) Eventually( func() (string, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerServiceAccountName(autoScalingListener), Namespace: autoScalingListener.Namespace}, serviceAccount) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, serviceAccount) if err != nil { return "", err } return serviceAccount.Name, nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerServiceAccountName(autoScalingListener)), "Service account should be created") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerServiceAccountName(autoscalingListener)), "Service account should be created") // Check if role is created role := new(rbacv1.Role) Eventually( func() ([]rbacv1.PolicyRule, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoScalingListener), Namespace: autoScalingListener.Spec.AutoscalingRunnerSetNamespace}, role) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role) if err != nil { return nil, err } return role.Rules, nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{autoScalingListener.Spec.EphemeralRunnerSetName})), "Role should be created") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{autoscalingListener.Spec.EphemeralRunnerSetName})), "Role should be created") // Check if rolebinding is created roleBinding := new(rbacv1.RoleBinding) Eventually( func() (string, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoScalingListener), Namespace: autoScalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding) if err != nil { return "", err } return roleBinding.RoleRef.Name, nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerRoleName(autoScalingListener)), "Rolebinding should be created") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerRoleName(autoscalingListener)), "Rolebinding should be created") // Check if pod is created pod := new(corev1.Pod) Eventually( func() (string, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod) if err != nil { return "", err } return pod.Name, nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created") }) }) @@ -226,25 +226,25 @@ var _ = Describe("Test AutoScalingListener controller", func() { pod := new(corev1.Pod) Eventually( func() (string, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod) if err != nil { return "", err } return pod.Name, nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created") // Delete the AutoScalingListener - err := k8sClient.Delete(ctx, autoScalingListener) + err := k8sClient.Delete(ctx, autoscalingListener) Expect(err).NotTo(HaveOccurred(), "failed to delete test AutoScalingListener") // Cleanup the listener pod Eventually( func() error { podList := new(corev1.PodList) - err := k8sClient.List(ctx, podList, client.InNamespace(autoScalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoScalingListener.Name}) + err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name}) if err != nil { return err } @@ -255,14 +255,14 @@ var _ = Describe("Test AutoScalingListener controller", func() { return nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete pod") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete pod") // Cleanup the listener service account Eventually( func() error { serviceAccountList := new(corev1.ServiceAccountList) - err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoScalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoScalingListener.Name}) + err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name}) if err != nil { return err } @@ -273,14 +273,14 @@ var _ = Describe("Test AutoScalingListener controller", func() { return nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete service account") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete service account") // The AutoScalingListener should be deleted Eventually( func() error { listenerList := new(actionsv1alpha1.AutoscalingListenerList) - err := k8sClient.List(ctx, listenerList, client.InNamespace(autoScalingListener.Namespace), client.MatchingFields{".metadata.name": autoScalingListener.Name}) + err := k8sClient.List(ctx, listenerList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{".metadata.name": autoscalingListener.Name}) if err != nil { return err } @@ -290,8 +290,8 @@ var _ = Describe("Test AutoScalingListener controller", func() { } return nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete AutoScalingListener") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete AutoScalingListener") }) }) @@ -301,35 +301,35 @@ var _ = Describe("Test AutoScalingListener controller", func() { pod := new(corev1.Pod) Eventually( func() (string, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod) if err != nil { return "", err } return pod.Name, nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created") // Update the AutoScalingListener - updated := autoScalingListener.DeepCopy() + updated := autoscalingListener.DeepCopy() updated.Spec.EphemeralRunnerSetName = "test-ers-updated" - err := k8sClient.Patch(ctx, updated, client.MergeFrom(autoScalingListener)) + err := k8sClient.Patch(ctx, updated, client.MergeFrom(autoscalingListener)) Expect(err).NotTo(HaveOccurred(), "failed to update test AutoScalingListener") // Check if role is updated with right rules role := new(rbacv1.Role) Eventually( func() ([]rbacv1.PolicyRule, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoScalingListener), Namespace: autoScalingListener.Spec.AutoscalingRunnerSetNamespace}, role) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role) if err != nil { return nil, err } return role.Rules, nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated") }) It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() { @@ -337,19 +337,19 @@ var _ = Describe("Test AutoScalingListener controller", func() { pod := new(corev1.Pod) Eventually( func() (string, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod) if err != nil { return "", err } return pod.Name, nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created") // Update the secret updatedSecret := configSecret.DeepCopy() - updatedSecret.Data["github_token"] = []byte(autoScalingListenerTestGitHubToken + "_updated") + updatedSecret.Data["github_token"] = []byte(autoscalingListenerTestGitHubToken + "_updated") err := k8sClient.Update(ctx, updatedSecret) Expect(err).NotTo(HaveOccurred(), "failed to update test secret") @@ -362,21 +362,21 @@ var _ = Describe("Test AutoScalingListener controller", func() { mirrorSecret := new(corev1.Secret) Eventually( func() (map[string][]byte, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoScalingListener), Namespace: autoScalingListener.Namespace}, mirrorSecret) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoscalingListener), Namespace: autoscalingListener.Namespace}, mirrorSecret) if err != nil { return nil, err } return mirrorSecret.Data, nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(BeEquivalentTo(updatedSecret.Data), "Mirror secret should be updated") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(BeEquivalentTo(updatedSecret.Data), "Mirror secret should be updated") // Check if we re-created a new pod Eventually( func() error { latestPod := new(corev1.Pod) - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, latestPod) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, latestPod) if err != nil { return err } @@ -386,8 +386,8 @@ var _ = Describe("Test AutoScalingListener controller", func() { return nil }, - autoScalingListenerTestTimeout, - autoScalingListenerTestInterval).Should(Succeed(), "Pod should be recreated") + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(Succeed(), "Pod should be recreated") }) }) }) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index fd1e5c6f6e..460dc1fc29 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -20,34 +20,34 @@ import ( ) const ( - autoScalingRunnerSetTestTimeout = time.Second * 5 - autoScalingRunnerSetTestInterval = time.Millisecond * 250 - autoScalingRunnerSetTestGitHubToken = "gh_token" + autoscalingRunnerSetTestTimeout = time.Second * 5 + autoscalingRunnerSetTestInterval = time.Millisecond * 250 + autoscalingRunnerSetTestGitHubToken = "gh_token" ) var _ = Describe("Test AutoScalingRunnerSet controller", func() { var ctx context.Context var cancel context.CancelFunc - autoScalingNS := new(corev1.Namespace) - autoScalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet) + autoscalingNS := new(corev1.Namespace) + autoscalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet) configSecret := new(corev1.Secret) BeforeEach(func() { ctx, cancel = context.WithCancel(context.TODO()) - autoScalingNS = &corev1.Namespace{ + autoscalingNS = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)}, } - err := k8sClient.Create(ctx, autoScalingNS) + err := k8sClient.Create(ctx, autoscalingNS) Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") configSecret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "github-config-secret", - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, }, Data: map[string][]byte{ - "github_token": []byte(autoScalingRunnerSetTestGitHubToken), + "github_token": []byte(autoscalingRunnerSetTestGitHubToken), }, } @@ -55,7 +55,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { Expect(err).NotTo(HaveOccurred(), "failed to create config secret") mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, MetricsBindAddress: "0", }) Expect(err).NotTo(HaveOccurred(), "failed to create manager") @@ -64,7 +64,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Log: logf.Log, - ControllerNamespace: autoScalingNS.Name, + ControllerNamespace: autoscalingNS.Name, DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", ActionsClient: fake.NewMultiClient(), } @@ -73,10 +73,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { min := 1 max := 10 - autoScalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, }, Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", @@ -96,7 +96,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { }, } - err = k8sClient.Create(ctx, autoScalingRunnerSet) + err = k8sClient.Create(ctx, autoscalingRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") go func() { @@ -110,7 +110,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { AfterEach(func() { defer cancel() - err := k8sClient.Delete(ctx, autoScalingNS) + err := k8sClient.Delete(ctx, autoscalingNS) Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") }) @@ -120,7 +120,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { created := new(actionsv1alpha1.AutoscalingRunnerSet) Eventually( func() (string, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, created) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created) if err != nil { return "", err } @@ -129,13 +129,13 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { } return created.Finalizers[0], nil }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer") // Check if runner scale set is created on service Eventually( func() (string, error) { - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, created) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created) if err != nil { return "", err } @@ -146,34 +146,34 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { return created.Annotations[runnerScaleSetIdKey], nil }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).Should(BeEquivalentTo("1"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("1"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation") // Check if ephemeral runner set is created Eventually( func() (int, error) { runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) - err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) if err != nil { return 0, err } return len(runnerSetList.Items), nil }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created") // Check if listener is created Eventually( func() error { - return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") // Check if status is updated runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) - err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") Expect(len(runnerSetList.Items)).To(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created") runnerSet := runnerSetList.Items[0] @@ -185,14 +185,14 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { Eventually( func() (int, error) { updated := new(actionsv1alpha1.AutoscalingRunnerSet) - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, updated) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated) if err != nil { return 0, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err) } return updated.Status.CurrentRunners, nil }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(100), "AutoScalingRunnerSet status should be updated") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(BeEquivalentTo(100), "AutoScalingRunnerSet status should be updated") }) }) @@ -201,33 +201,33 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // Wait till the listener is created Eventually( func() error { - return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") // Delete the AutoScalingRunnerSet - err := k8sClient.Delete(ctx, autoScalingRunnerSet) + err := k8sClient.Delete(ctx, autoscalingRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to delete AutoScalingRunnerSet") // Check if the listener is deleted Eventually( func() error { - err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) if err != nil && errors.IsNotFound(err) { return nil } return fmt.Errorf("listener is not deleted") }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be deleted") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(Succeed(), "Listener should be deleted") // Check if all the EphemeralRunnerSet is deleted Eventually( func() error { runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) - err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) if err != nil { return err } @@ -238,21 +238,21 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { return nil }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).Should(Succeed(), "All EphemeralRunnerSet should be deleted") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(Succeed(), "All EphemeralRunnerSet should be deleted") // Check if the AutoScalingRunnerSet is deleted Eventually( func() error { - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingRunnerSet)) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingRunnerSet)) if err != nil && errors.IsNotFound(err) { return nil } return fmt.Errorf("AutoScalingRunnerSet is not deleted") }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).Should(Succeed(), "AutoScalingRunnerSet should be deleted") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(Succeed(), "AutoScalingRunnerSet should be deleted") }) }) @@ -262,30 +262,30 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { listener := new(actionsv1alpha1.AutoscalingListener) Eventually( func() error { - return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener) + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener) }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) - err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet") runnerSet := runnerSetList.Items[0] // Update the AutoScalingRunnerSet.Spec.Template // This should trigger re-creation of EphemeralRunnerSet and Listener - patched := autoScalingRunnerSet.DeepCopy() + patched := autoscalingRunnerSet.DeepCopy() patched.Spec.Template.Spec.PriorityClassName = "test-priority-class" - err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoScalingRunnerSet)) + err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") - autoScalingRunnerSet = patched.DeepCopy() + autoscalingRunnerSet = patched.DeepCopy() // We should create a new EphemeralRunnerSet and delete the old one, eventually, we will have only one EphemeralRunnerSet Eventually( func() (string, error) { runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) - err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) if err != nil { return "", err } @@ -296,46 +296,46 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { return runnerSetList.Items[0].Labels[LabelKeyRunnerSpecHash], nil }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[LabelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[LabelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created") // We should create a new listener Eventually( func() (string, error) { listener := new(actionsv1alpha1.AutoscalingListener) - err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener) if err != nil { return "", err } return listener.Spec.EphemeralRunnerSetName, nil }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Name), "New Listener should be created") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Name), "New Listener should be created") // Only update the Spec for the AutoScalingListener // This should trigger re-creation of the Listener only runnerSetList = new(actionsv1alpha1.EphemeralRunnerSetList) - err = k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + err = k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet") runnerSet = runnerSetList.Items[0] listener = new(actionsv1alpha1.AutoscalingListener) - err = k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener) + err = k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener) Expect(err).NotTo(HaveOccurred(), "failed to get Listener") - patched = autoScalingRunnerSet.DeepCopy() + patched = autoscalingRunnerSet.DeepCopy() min := 10 patched.Spec.MinRunners = &min - err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoScalingRunnerSet)) + err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") // We should not re-create a new EphemeralRunnerSet Consistently( func() (string, error) { runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) - err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace)) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) if err != nil { return "", err } @@ -346,22 +346,22 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { return string(runnerSetList.Items[0].UID), nil }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(string(runnerSet.UID)), "New EphemeralRunnerSet should not be created") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(BeEquivalentTo(string(runnerSet.UID)), "New EphemeralRunnerSet should not be created") // We should only re-create a new listener Eventually( func() (string, error) { listener := new(actionsv1alpha1.AutoscalingListener) - err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener) if err != nil { return "", err } return string(listener.UID), nil }, - autoScalingRunnerSetTestTimeout, - autoScalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(string(listener.UID)), "New Listener should be created") + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(string(listener.UID)), "New Listener should be created") }) }) }) diff --git a/controllers/actions.github.com/ephemeralrunner_controller_test.go b/controllers/actions.github.com/ephemeralrunner_controller_test.go index 7f006454d7..749b8b2cf9 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunner_controller_test.go @@ -78,16 +78,14 @@ func newExampleRunner(name, namespace, configSecretName string) *v1alpha1.Epheme }, }, } - } var _ = Describe("EphemeralRunner", func() { - Describe("Resource manipulation", func() { var ctx context.Context var cancel context.CancelFunc - autoScalingNS := new(corev1.Namespace) + autoscalingNS := new(corev1.Namespace) configSecret := new(corev1.Secret) controller := new(EphemeralRunnerReconciler) @@ -95,18 +93,18 @@ var _ = Describe("EphemeralRunner", func() { BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) - autoScalingNS = &corev1.Namespace{ + autoscalingNS = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "testns-autoscaling-runner" + RandStringRunes(5), }, } - err := k8sClient.Create(ctx, autoScalingNS) + err := k8sClient.Create(ctx, autoscalingNS) Expect(err).To(BeNil(), "failed to create test namespace for EphemeralRunner") configSecret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "github-config-secret", - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, }, Data: map[string][]byte{ "github_token": []byte(gh_token), @@ -117,7 +115,7 @@ var _ = Describe("EphemeralRunner", func() { Expect(err).To(BeNil(), "failed to create config secret") mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, MetricsBindAddress: "0", }) Expect(err).To(BeNil(), "failed to create manager") @@ -132,7 +130,7 @@ var _ = Describe("EphemeralRunner", func() { err = controller.SetupWithManager(mgr) Expect(err).To(BeNil(), "failed to setup controller") - ephemeralRunner = newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name) + ephemeralRunner = newExampleRunner("test-runner", autoscalingNS.Name, configSecret.Name) err = k8sClient.Create(ctx, ephemeralRunner) Expect(err).To(BeNil(), "failed to create ephemeral runner") @@ -147,7 +145,7 @@ var _ = Describe("EphemeralRunner", func() { AfterEach(func() { defer cancel() - err := k8sClient.Delete(ctx, autoScalingNS) + err := k8sClient.Delete(ctx, autoscalingNS) Expect(err).To(BeNil(), "failed to delete test namespace for EphemeralRunner") }) @@ -357,7 +355,6 @@ var _ = Describe("EphemeralRunner", func() { timeout, interval, ).Should(BeEquivalentTo(true)) - }) It("It should eventually have runner id set", func() { @@ -558,7 +555,6 @@ var _ = Describe("EphemeralRunner", func() { timeout, interval, ).Should(BeEquivalentTo(true)) - }) It("It should re-create pod on exit status 0, but runner exists within the service", func() { @@ -668,25 +664,25 @@ var _ = Describe("EphemeralRunner", func() { var ctx context.Context var cancel context.CancelFunc - autoScalingNS := new(corev1.Namespace) + autoscalingNS := new(corev1.Namespace) configSecret := new(corev1.Secret) var mgr manager.Manager BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) - autoScalingNS = &corev1.Namespace{ + autoscalingNS = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "testns-autoscaling-runner" + RandStringRunes(5), }, } - err := k8sClient.Create(ctx, autoScalingNS) + err := k8sClient.Create(ctx, autoscalingNS) Expect(err).To(BeNil(), "failed to create test namespace for EphemeralRunner") configSecret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "github-config-secret", - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, }, Data: map[string][]byte{ "github_token": []byte(gh_token), @@ -697,17 +693,16 @@ var _ = Describe("EphemeralRunner", func() { Expect(err).To(BeNil(), "failed to create config secret") mgr, err = ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, MetricsBindAddress: "0", }) Expect(err).To(BeNil(), "failed to create manager") - }) AfterEach(func() { defer cancel() - err := k8sClient.Delete(ctx, autoScalingNS) + err := k8sClient.Delete(ctx, autoscalingNS) Expect(err).To(BeNil(), "failed to delete test namespace for EphemeralRunner") }) @@ -742,7 +737,7 @@ var _ = Describe("EphemeralRunner", func() { Expect(err).To(BeNil(), "failed to start manager") }() - ephemeralRunner := newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name) + ephemeralRunner := newExampleRunner("test-runner", autoscalingNS.Name, configSecret.Name) err = k8sClient.Create(ctx, ephemeralRunner) Expect(err).To(BeNil()) diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go index 817e846f33..095b0ae351 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -28,23 +28,23 @@ const ( var _ = Describe("Test EphemeralRunnerSet controller", func() { var ctx context.Context var cancel context.CancelFunc - autoScalingNS := new(corev1.Namespace) + autoscalingNS := new(corev1.Namespace) ephemeralRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) configSecret := new(corev1.Secret) BeforeEach(func() { ctx, cancel = context.WithCancel(context.TODO()) - autoScalingNS = &corev1.Namespace{ + autoscalingNS = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-runnerset" + RandStringRunes(5)}, } - err := k8sClient.Create(ctx, autoScalingNS) + err := k8sClient.Create(ctx, autoscalingNS) Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for EphemeralRunnerSet") configSecret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "github-config-secret", - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, }, Data: map[string][]byte{ "github_token": []byte(ephemeralRunnerSetTestGitHubToken), @@ -55,7 +55,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { Expect(err).NotTo(HaveOccurred(), "failed to create config secret") mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, MetricsBindAddress: "0", }) Expect(err).NotTo(HaveOccurred(), "failed to create manager") @@ -72,7 +72,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", - Namespace: autoScalingNS.Name, + Namespace: autoscalingNS.Name, }, Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ @@ -107,7 +107,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { AfterEach(func() { defer cancel() - err := k8sClient.Delete(ctx, autoScalingNS) + err := k8sClient.Delete(ctx, autoscalingNS) Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for EphemeralRunnerSet") }) diff --git a/go.mod b/go.mod index e49696f690..5dabdd7218 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 + github.com/gruntwork-io/terratest v0.41.9 github.com/hashicorp/go-retryablehttp v0.7.1 github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 @@ -37,15 +38,21 @@ require ( cloud.google.com/go/compute/metadata v0.2.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/aws/aws-sdk-go v1.40.56 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/emicklei/go-restful/v3 v3.8.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect github.com/go-logr/zapr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-sql-driver/mysql v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect @@ -53,22 +60,32 @@ require ( github.com/google/go-github/v45 v45.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.1.0 // indirect + github.com/gruntwork-io/go-commons v0.8.0 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.0 // indirect github.com/imdario/mergo v0.3.12 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect + github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pquerna/otp v1.2.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.4.0 // indirect + github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect golang.org/x/crypto v0.0.0-20220824171710-5757bc0c5503 // indirect golang.org/x/net v0.5.0 // indirect diff --git a/go.sum b/go.sum index fb48b8cd49..0a9ffec77c 100644 --- a/go.sum +++ b/go.sum @@ -48,12 +48,18 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/aws/aws-sdk-go v1.40.56 h1:FM2yjR0UUYFzDTMx+mH9Vyw1k1EUUxsAFzk+BjkzANA= +github.com/aws/aws-sdk-go v1.40.56/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 h1:5+NghM1Zred9Z078QEZtm28G/kfDfZN/92gkDlLwGVA= github.com/bradleyfalzon/ghinstallation/v2 v2.1.0/go.mod h1:Xg3xPRN5Mcq6GDqeUVhFbjEWMb4JHCyWEeeBGEYQoTU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -65,11 +71,15 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -81,10 +91,16 @@ github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU= +github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -111,6 +127,8 @@ github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -190,11 +208,20 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro= +github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78= +github.com/gruntwork-io/terratest v0.41.9 h1:jyygu23iLcEFjGQhlvRx4R0EJVqOoriP+Ire4U9cZA0= +github.com/gruntwork-io/terratest v0.41.9/go.mod h1:qH1xkPTTGx30XkMHw8jAVIbzqheSjIa5IyiTwSV2vKI= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -204,6 +231,10 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -233,9 +264,19 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg= +github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -267,6 +308,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok= +github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -293,6 +336,10 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -315,6 +362,8 @@ github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PK github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/teambition/rrule-go v1.8.0 h1:a/IX5s56hGkFF+nRlJUooZU/45OTeeldBGL29nDKIHw= github.com/teambition/rrule-go v1.8.0/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -407,6 +456,7 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= @@ -435,6 +485,7 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -446,6 +497,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= From 24c02c927a9196303070774701405a552632ffc9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Jan 2023 17:36:05 -0500 Subject: [PATCH 040/561] chore(deps): bump github.com/hashicorp/go-retryablehttp from 0.7.1 to 0.7.2 (#2203) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 5dabdd7218..1f7270245c 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 github.com/gruntwork-io/terratest v0.41.9 - github.com/hashicorp/go-retryablehttp v0.7.1 + github.com/hashicorp/go-retryablehttp v0.7.2 github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.25.0 diff --git a/go.sum b/go.sum index 0a9ffec77c..c71112be30 100644 --- a/go.sum +++ b/go.sum @@ -215,15 +215,14 @@ github.com/gruntwork-io/terratest v0.41.9 h1:jyygu23iLcEFjGQhlvRx4R0EJVqOoriP+Ir github.com/gruntwork-io/terratest v0.41.9/go.mod h1:qH1xkPTTGx30XkMHw8jAVIbzqheSjIa5IyiTwSV2vKI= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= -github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= +github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= From f988a8c001c15e47f6b88866092d8ddc99e1d05a Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Mon, 23 Jan 2023 22:36:57 +0000 Subject: [PATCH 041/561] Add support for custom CA in actions.Client (#2199) --- .gitignore | 1 + github/actions/client.go | 43 ++++++- github/actions/client_tls_test.go | 149 +++++++++++++++++++++++ github/actions/multi_client.go | 17 +++ github/actions/testdata/intermediate.pem | 73 +++++++++++ github/actions/testdata/leaf.key | 27 ++++ github/actions/testdata/leaf.pem | 81 ++++++++++++ github/actions/testdata/rootCA.crt | 18 +++ github/actions/testdata/server.crt | 22 ++++ github/actions/testdata/server.key | 27 ++++ 10 files changed, 455 insertions(+), 3 deletions(-) create mode 100644 github/actions/client_tls_test.go create mode 100644 github/actions/testdata/intermediate.pem create mode 100644 github/actions/testdata/leaf.key create mode 100644 github/actions/testdata/leaf.pem create mode 100644 github/actions/testdata/rootCA.crt create mode 100644 github/actions/testdata/server.crt create mode 100644 github/actions/testdata/server.key diff --git a/.gitignore b/.gitignore index ef96cc88ce..0e4e30b7a8 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ bin .env .test.env *.pem +!github/actions/testdata/*.pem # OS .DS_STORE diff --git a/github/actions/client.go b/github/actions/client.go index f52d2b9542..fe16433072 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -3,6 +3,8 @@ package actions import ( "bytes" "context" + "crypto/tls" + "crypto/x509" "encoding/base64" "encoding/json" "fmt" @@ -70,6 +72,9 @@ type Client struct { githubConfigURL string logger logr.Logger userAgent string + + rootCAs *x509.CertPool + tlsInsecureSkipVerify bool } type ClientOption func(*Client) @@ -98,6 +103,18 @@ func WithRetryWaitMax(retryWaitMax time.Duration) ClientOption { } } +func WithRootCAs(rootCAs *x509.CertPool) ClientOption { + return func(c *Client) { + c.rootCAs = rootCAs + } +} + +func WithoutTLSVerify() ClientOption { + return func(c *Client) { + c.tlsInsecureSkipVerify = true + } +} + func NewClient(ctx context.Context, githubConfigURL string, creds *ActionsAuth, options ...ClientOption) (ActionsService, error) { ac := &Client{ creds: creds, @@ -121,6 +138,26 @@ func NewClient(ctx context.Context, githubConfigURL string, creds *ActionsAuth, retryClient.RetryMax = ac.retryMax retryClient.RetryWaitMax = ac.retryWaitMax + + transport, ok := retryClient.HTTPClient.Transport.(*http.Transport) + if !ok { + // this should always be true, because retryablehttp.NewClient() uses + // cleanhttp.DefaultPooledTransport() + return nil, fmt.Errorf("failed to get http transport from retryablehttp client") + } + if transport.TLSClientConfig == nil { + transport.TLSClientConfig = &tls.Config{} + } + + if ac.rootCAs != nil { + transport.TLSClientConfig.RootCAs = ac.rootCAs + } + + if ac.tlsInsecureSkipVerify { + transport.TLSClientConfig.InsecureSkipVerify = true + } + + retryClient.HTTPClient.Transport = transport ac.Client = retryClient.StandardClient() rt, err := ac.getRunnerRegistrationToken(ctx, githubConfigURL, *creds) @@ -776,7 +813,7 @@ func (c *Client) getRunnerRegistrationToken(ctx context.Context, githubConfigUrl c.logger.Info("getting runner registration token", "registrationTokenURL", registrationTokenURL) - resp, err := http.DefaultClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -832,7 +869,7 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c c.logger.Info("getting access token for GitHub App auth", "accessTokenURL", accessTokenURL.String()) - resp, err := http.DefaultClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } @@ -892,7 +929,7 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis c.logger.Info("getting Actions tenant URL and JWT", "registrationURL", registrationURL.String()) - resp, err := http.DefaultClient.Do(req) + resp, err := c.Do(req) if err != nil { return nil, err } diff --git a/github/actions/client_tls_test.go b/github/actions/client_tls_test.go new file mode 100644 index 0000000000..320798b8b3 --- /dev/null +++ b/github/actions/client_tls_test.go @@ -0,0 +1,149 @@ +package actions_test + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/golang-jwt/jwt/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestServerWithSelfSignedCertificates(t *testing.T) { + ctx := context.Background() + + // this handler is a very very barebones replica of actions api + // used during the creation of a a new client + h := func(w http.ResponseWriter, r *http.Request) { + // handle get registration token + if strings.HasSuffix(r.URL.Path, "/runners/registration-token") { + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"token":"token"}`)) + return + } + + // handle getActionsServiceAdminConnection + if strings.HasSuffix(r.URL.Path, "/actions/runner-registration") { + claims := &jwt.RegisteredClaims{ + IssuedAt: jwt.NewNumericDate(time.Now().Add(-1 * time.Minute)), + ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Minute)), + Issuer: "123", + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) + privateKey, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(samplePrivateKey)) + require.NoError(t, err) + tokenString, err := token.SignedString(privateKey) + require.NoError(t, err) + w.Write([]byte(`{"url":"TODO","token":"` + tokenString + `"}`)) + return + } + } + + certPath := filepath.Join("testdata", "server.crt") + keyPath := filepath.Join("testdata", "server.key") + + t.Run("client without ca certs", func(t *testing.T) { + server := startNewTLSTestServer(t, certPath, keyPath, http.HandlerFunc(h)) + configURL := server.URL + "/my-org" + + auth := &actions.ActionsAuth{ + Token: "token", + } + client, err := actions.NewClient(ctx, configURL, auth) + assert.Nil(t, client) + require.NotNil(t, err) + + if runtime.GOOS == "linux" { + assert.True(t, errors.As(err, &x509.UnknownAuthorityError{})) + } + + // on macOS we only get an untyped error from the system verifying the + // certificate + if runtime.GOOS == "darwin" { + assert.True(t, strings.HasSuffix(err.Error(), "certificate is not trusted")) + } + }) + + t.Run("client with ca certs", func(t *testing.T) { + server := startNewTLSTestServer(t, certPath, keyPath, http.HandlerFunc(h)) + configURL := server.URL + "/my-org" + + auth := &actions.ActionsAuth{ + Token: "token", + } + + cert, err := os.ReadFile(filepath.Join("testdata", "rootCA.crt")) + require.NoError(t, err) + + pool, err := actions.RootCAsFromConfigMap(map[string][]byte{"cert": cert}) + require.NoError(t, err) + + client, err := actions.NewClient(ctx, configURL, auth, actions.WithRootCAs(pool)) + require.NoError(t, err) + assert.NotNil(t, client) + }) + + t.Run("client with ca chain certs", func(t *testing.T) { + server := startNewTLSTestServer( + t, + filepath.Join("testdata", "leaf.pem"), + filepath.Join("testdata", "leaf.key"), + http.HandlerFunc(h), + ) + configURL := server.URL + "/my-org" + + auth := &actions.ActionsAuth{ + Token: "token", + } + + cert, err := os.ReadFile(filepath.Join("testdata", "intermediate.pem")) + require.NoError(t, err) + + pool, err := actions.RootCAsFromConfigMap(map[string][]byte{"cert": cert}) + require.NoError(t, err) + + client, err := actions.NewClient(ctx, configURL, auth, actions.WithRootCAs(pool), actions.WithRetryMax(0)) + require.NoError(t, err) + assert.NotNil(t, client) + }) + + t.Run("client skipping tls verification", func(t *testing.T) { + server := startNewTLSTestServer(t, certPath, keyPath, http.HandlerFunc(h)) + configURL := server.URL + "/my-org" + + auth := &actions.ActionsAuth{ + Token: "token", + } + + client, err := actions.NewClient(ctx, configURL, auth, actions.WithoutTLSVerify()) + require.NoError(t, err) + assert.NotNil(t, client) + }) +} + +func startNewTLSTestServer(t *testing.T, certPath, keyPath string, handler http.Handler) *httptest.Server { + server := httptest.NewUnstartedServer(handler) + t.Cleanup(func() { + server.Close() + }) + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + require.NoError(t, err) + + server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} + server.StartTLS() + + return server +} diff --git a/github/actions/multi_client.go b/github/actions/multi_client.go index c7a53b74bc..85e0fa7591 100644 --- a/github/actions/multi_client.go +++ b/github/actions/multi_client.go @@ -2,6 +2,7 @@ package actions import ( "context" + "crypto/x509" "fmt" "net/url" "strconv" @@ -168,3 +169,19 @@ func (m *multiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, auth.AppCreds = &GitHubAppAuth{AppID: parsedAppID, AppInstallationID: parsedAppInstallationID, AppPrivateKey: appPrivateKey} return m.GetClientFor(ctx, githubConfigURL, auth, namespace) } + +func RootCAsFromConfigMap(configMapData map[string][]byte) (*x509.CertPool, error) { + caCertPool, err := x509.SystemCertPool() + if err != nil { + caCertPool = x509.NewCertPool() + } + + for key, certData := range configMapData { + ok := caCertPool.AppendCertsFromPEM(certData) + if !ok { + return nil, fmt.Errorf("no certificates successfully parsed from key %s", key) + } + } + + return caCertPool, nil +} diff --git a/github/actions/testdata/intermediate.pem b/github/actions/testdata/intermediate.pem new file mode 100644 index 0000000000..527f5c2b70 --- /dev/null +++ b/github/actions/testdata/intermediate.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 8 (0x8) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, O=arc-test, CN=localhost + Validity + Not Before: Jan 23 17:54:51 2023 GMT + Not After : Jun 9 17:54:51 2050 GMT + Subject: C=US, O=arc-test, CN=localhost + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public-Key: (2048 bit) + Modulus: + 00:dd:61:59:0a:19:19:1a:d4:e1:f1:c0:8d:bb:c2: + f8:32:e5:04:55:c5:ea:f6:71:5c:d3:ad:d0:b1:c3: + 86:73:ba:f1:01:7f:5d:45:6c:bf:0d:e6:27:c4:f0: + a0:f2:be:73:61:04:1f:f5:ca:3b:9d:11:c6:00:ae: + 49:6f:7f:9c:f7:e1:21:e4:53:aa:29:71:58:fe:e8: + c8:6c:25:2f:0a:ef:8f:be:e8:1c:9d:76:05:4a:28: + e1:88:20:4b:4a:51:59:48:3c:84:05:ec:10:ae:be: + 76:05:ee:ff:bf:54:67:02:e6:01:e8:02:b4:d0:07: + 79:39:10:71:e6:b1:25:b5:6a:24:7c:22:ef:70:90: + 5b:32:69:81:9d:34:82:a6:3b:fd:b5:8e:6b:8d:12: + e7:bd:0a:0d:61:1f:ed:16:82:30:f9:2c:93:8d:fe: + 70:b5:4d:c4:53:0b:5e:f1:ba:4a:c5:08:ba:56:8f: + dd:b7:fc:13:cd:1b:d1:1c:31:00:d1:7d:49:fd:54: + 4d:73:e8:73:1d:69:dd:98:53:fe:77:66:3f:05:a7: + 61:1c:e4:c2:a6:b9:31:df:c5:0b:b5:78:fc:7f:42: + 9f:0e:a6:1a:eb:59:46:be:ac:95:8a:85:ea:05:e4: + 8a:33:00:2e:8e:d9:a4:20:4a:39:77:53:16:7c:8a: + 9c:59 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: critical + CA:TRUE + X509v3 Key Usage: critical + Certificate Sign + Signature Algorithm: sha256WithRSAEncryption + a5:5c:2f:be:b7:e4:a8:e7:95:7a:58:93:be:5e:3a:5a:f7:0b: + 70:ba:8e:b8:a8:dc:7c:5b:2c:c1:5b:80:f3:8f:8a:c4:2b:d2: + ad:69:21:29:75:3a:5b:7d:bb:4f:2b:f9:27:4a:ab:d7:bd:05: + 0a:aa:50:e7:b0:2d:7f:05:2d:42:af:c1:de:aa:a1:69:b1:b4: + 78:ce:f2:78:98:97:49:c0:be:1b:5f:23:47:8d:c5:e8:c4:85: + 84:31:d0:5c:9b:12:96:43:08:ae:32:dc:9d:d4:ad:c6:6d:15: + ad:0f:6c:ec:50:61:86:3c:b7:75:90:6b:44:d5:dd:56:c1:11: + fe:6e:07:80:85:93:8a:34:da:e9:38:21:ac:ce:73:ce:c1:26: + 4e:94:2f:9b:82:b5:06:7a:ef:21:3a:80:79:89:c2:fd:e5:04: + 25:1c:a8:b2:28:91:1f:a1:91:b6:82:ea:ce:64:21:ef:da:0c: + af:bf:09:5a:e2:9f:5b:f6:0f:bf:cf:91:d3:97:7f:f1:25:9b: + 8b:5f:10:16:fb:a8:92:11:13:38:cb:32:02:03:69:6f:9e:fe: + 2a:b0:56:c7:49:f3:2a:9b:c6:ee:a2:98:25:d2:a0:c0:f3:c4: + 03:99:e1:94:e3:f5:95:28:07:ec:db:31:3a:25:79:c1:45:c8: + 8a:1e:75:39 +-----BEGIN CERTIFICATE----- +MIIDCDCCAfCgAwIBAgIBCDANBgkqhkiG9w0BAQsFADA0MQswCQYDVQQGEwJVUzER +MA8GA1UECgwIYXJjLXRlc3QxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0yMzAxMjMx +NzU0NTFaGA8yMDUwMDYwOTE3NTQ1MVowNDELMAkGA1UEBhMCVVMxETAPBgNVBAoM +CGFyYy10ZXN0MRIwEAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQDdYVkKGRka1OHxwI27wvgy5QRVxer2cVzTrdCxw4ZzuvEB +f11FbL8N5ifE8KDyvnNhBB/1yjudEcYArklvf5z34SHkU6opcVj+6MhsJS8K74++ +6ByddgVKKOGIIEtKUVlIPIQF7BCuvnYF7v+/VGcC5gHoArTQB3k5EHHmsSW1aiR8 +Iu9wkFsyaYGdNIKmO/21jmuNEue9Cg1hH+0WgjD5LJON/nC1TcRTC17xukrFCLpW +j923/BPNG9EcMQDRfUn9VE1z6HMdad2YU/53Zj8Fp2Ec5MKmuTHfxQu1ePx/Qp8O +phrrWUa+rJWKheoF5IozAC6O2aQgSjl3UxZ8ipxZAgMBAAGjIzAhMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgIEMA0GCSqGSIb3DQEBCwUAA4IBAQClXC++ +t+So55V6WJO+Xjpa9wtwuo64qNx8WyzBW4Dzj4rEK9KtaSEpdTpbfbtPK/knSqvX +vQUKqlDnsC1/BS1Cr8HeqqFpsbR4zvJ4mJdJwL4bXyNHjcXoxIWEMdBcmxKWQwiu +Mtyd1K3GbRWtD2zsUGGGPLd1kGtE1d1WwRH+bgeAhZOKNNrpOCGsznPOwSZOlC+b +grUGeu8hOoB5icL95QQlHKiyKJEfoZG2gurOZCHv2gyvvwla4p9b9g+/z5HTl3/x +JZuLXxAW+6iSERM4yzICA2lvnv4qsFbHSfMqm8buopgl0qDA88QDmeGU4/WVKAfs +2zE6JXnBRciKHnU5 +-----END CERTIFICATE----- diff --git a/github/actions/testdata/leaf.key b/github/actions/testdata/leaf.key new file mode 100644 index 0000000000..b479990f6e --- /dev/null +++ b/github/actions/testdata/leaf.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApgzbb+dY2DKM+Ysrk+l7guhvtgY9q5ws7pqF0duYkI2zmyMW +EDSkXKPoODiimYhol4Cr7c6hgtOzZS0+W4kVdhDLpk/mg9a4ZTLJqn2DIHj9Q0G+ +ENJrENjxPHfykXcXs2LAgRLffle4g4bfnJVQCyzZNiCblpqTnSSyEFa1AEtrxq6r +2E/bYjBm18G4WxBOWHukuYsZ5FKlgzT/ZNeLoME9WDp4+wxKAEGSnEhlPv/Sr6ns +GxPz5i9NPBFqg373oDW17Nxere7M6l6oMqNtFbsQafI7Jmy4rrgHBrDf0s1SlaY3 +ceDPwXpT9ttHXZe9Dqb6MSEEQvL4IWG0TEGMJwIDAQABAoIBADfl8CEVslTlf4uq +C/t5B/kjoieWpkAVDRMttYrV7+AJs8Kv5weBkSsWimASwLoKr5sA19/wRXKzLZsL +xggud6kNMmFEWIddSynWFQltwyy1ThzMDt2+2AgN3/fJMUFC5BmhTsikv9PaV+T/ +DFu77/wzFtQf2gCy/KpT5hWV+sykDBriswFoVycUbC2kAcxcaTJioB5TKmNQoxxe +pUxiQSaEgVYTAuKt5da1UqBmiqoqNUQNIC+Q7T6cGw6F6WPd6sF13VXHF3EU/Q+6 +6bW1p+9iuDxAa6sWI5gLCYFq4vcgTvqa5tGSsrxw7CNmuZFc/HtjMqAcBGDAKwFI +zrTW8sECgYEA2XxoucFX81P+AW6C2ymHCH+4BUdLYANkAE5VLkiTvGN9NCqYmdQD +5jb1yE3dp0YmDvZ/ZGJpwrJHwU7r1FNt1psRpPhIyErzk7MxgF0Efa4dRl3c2rno +aTWgWSTXB1UU3+ev157P3vGNJEewCCgMwdp4qv9dVu+mGaJTZWAKNnECgYEAw3Sj +9bV1c5uaoPvMNAIBVFeSL3W9A6A90uPcPyu55NAsEHtZgAd8JFSYnT2rwbND1CC8 +YqynWfvAEyXshVeBEurRC13XCXhB6U3rfEFnLA5+HZsCgpFNfKHiNvxfVGsGGTRn +YKGYAPOHz1jN8TVT3ZwzKNc5olzVB4KP97ylKBcCgYEApqqHWurG6qsQOaqlzyw4 +1hOCQ1FKew6+INnmvyxRQwp/FW4bOa9XOaIeolzBowHIAql2IMimQdT71jET2sgA +oXh+ggzfQdbmaObm5XbjDSlUN+uQZ3IWoCG/evEXdAAImjnj8Ho81J4JyqbBSM7g +T+KLnIdL0WafxH84J7T8vpECgYBYJZ5cDX3uqVpPB7/MJKtc0jGHXd3kaLv5A/Is +OxgW7RsyQ67VYorGB7DcGRgAv0vzut+60IqYtkSlXhERAamgUm38ZlG4X5e6E/4D +h6tz3wVZbLLxF36OmqNekOqdM7cIXu3QUpAuvaWeCTq3cYllBDC+VnITmzIntOYg +n98L+QKBgF4AQDN4Mcet9RSFVdgK2Ue11ngr39SUUQapsK7uFvRZhv86voeDR3zv +4zaj5JIemaRAOMnJS0pdHBHoz4tcqeDcqqHAdliZ/DYmiFhm8Q6Jufzc0KBkus6p +w8/pSBRpjZQZrgQZxYoU1g9Smy94ysY4DHt5BZIWGbBiwaREARYO +-----END RSA PRIVATE KEY----- diff --git a/github/actions/testdata/leaf.pem b/github/actions/testdata/leaf.pem new file mode 100644 index 0000000000..a87d3be40c --- /dev/null +++ b/github/actions/testdata/leaf.pem @@ -0,0 +1,81 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 11 (0xb) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, O=arc-test, CN=localhost + Validity + Not Before: Jan 23 17:54:52 2023 GMT + Not After : Jun 9 17:54:52 2050 GMT + Subject: C=US, O=actions-runner-controller, OU=actions-runner-controller test + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public-Key: (2048 bit) + Modulus: + 00:a6:0c:db:6f:e7:58:d8:32:8c:f9:8b:2b:93:e9: + 7b:82:e8:6f:b6:06:3d:ab:9c:2c:ee:9a:85:d1:db: + 98:90:8d:b3:9b:23:16:10:34:a4:5c:a3:e8:38:38: + a2:99:88:68:97:80:ab:ed:ce:a1:82:d3:b3:65:2d: + 3e:5b:89:15:76:10:cb:a6:4f:e6:83:d6:b8:65:32: + c9:aa:7d:83:20:78:fd:43:41:be:10:d2:6b:10:d8: + f1:3c:77:f2:91:77:17:b3:62:c0:81:12:df:7e:57: + b8:83:86:df:9c:95:50:0b:2c:d9:36:20:9b:96:9a: + 93:9d:24:b2:10:56:b5:00:4b:6b:c6:ae:ab:d8:4f: + db:62:30:66:d7:c1:b8:5b:10:4e:58:7b:a4:b9:8b: + 19:e4:52:a5:83:34:ff:64:d7:8b:a0:c1:3d:58:3a: + 78:fb:0c:4a:00:41:92:9c:48:65:3e:ff:d2:af:a9: + ec:1b:13:f3:e6:2f:4d:3c:11:6a:83:7e:f7:a0:35: + b5:ec:dc:5e:ad:ee:cc:ea:5e:a8:32:a3:6d:15:bb: + 10:69:f2:3b:26:6c:b8:ae:b8:07:06:b0:df:d2:cd: + 52:95:a6:37:71:e0:cf:c1:7a:53:f6:db:47:5d:97: + bd:0e:a6:fa:31:21:04:42:f2:f8:21:61:b4:4c:41: + 8c:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Authority Key Identifier: + DirName:/C=US/O=arc-test/CN=localhost + serial:08 + + X509v3 Basic Constraints: + CA:FALSE + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment, Data Encipherment + X509v3 Subject Alternative Name: + IP Address:127.0.0.1, DNS:localhost + Signature Algorithm: sha256WithRSAEncryption + 73:70:5c:40:cf:48:a9:c0:8b:50:c8:10:b5:3c:57:18:fd:ac: + 05:6b:7c:8f:ad:b2:cc:2a:92:b8:70:57:19:88:40:b6:b1:d9: + e7:44:7b:44:69:4b:dc:10:20:08:a8:5a:b3:29:3c:ce:42:f8: + 57:04:e4:9b:b6:d8:22:0f:d4:4a:51:76:b8:32:4b:b6:bd:b9: + 10:4a:69:b6:20:f3:77:2b:eb:7b:11:b3:c9:1d:96:a6:0d:9a: + 29:ae:e6:89:91:95:26:29:7a:a9:e9:8f:6e:9c:aa:17:96:e7: + 87:04:84:bb:61:38:a8:d3:f7:2e:ef:ce:49:38:e7:d9:2c:86: + be:a8:63:98:6a:f2:62:4f:48:1a:ee:d0:3f:9c:33:1e:d2:b3: + 3d:3c:bd:ab:4d:a9:c0:02:d2:ae:01:f4:fb:dd:1d:10:82:08: + 26:d2:06:2c:c1:5a:3c:76:c6:85:b8:c4:22:63:7d:c1:40:c5: + 44:bf:ac:b9:6e:58:ac:5b:5e:5f:34:08:a7:08:88:14:10:3f: + 3d:5d:6e:9c:38:d6:9c:2d:45:88:3f:46:10:15:bd:2f:d5:75: + 5f:cc:cb:f3:e7:56:c2:d9:99:7b:a9:ea:a8:b5:ff:60:35:28: + b9:0c:6b:13:0b:d9:e0:d1:89:11:9b:4b:26:ad:2e:5a:93:ea: + 56:00:da:a0 +-----BEGIN CERTIFICATE----- +MIIDiTCCAnGgAwIBAgIBCzANBgkqhkiG9w0BAQsFADA0MQswCQYDVQQGEwJVUzER +MA8GA1UECgwIYXJjLXRlc3QxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0yMzAxMjMx +NzU0NTJaGA8yMDUwMDYwOTE3NTQ1MlowWjELMAkGA1UEBhMCVVMxIjAgBgNVBAoM +GWFjdGlvbnMtcnVubmVyLWNvbnRyb2xsZXIxJzAlBgNVBAsMHmFjdGlvbnMtcnVu +bmVyLWNvbnRyb2xsZXIgdGVzdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAKYM22/nWNgyjPmLK5Ppe4Lob7YGPaucLO6ahdHbmJCNs5sjFhA0pFyj6Dg4 +opmIaJeAq+3OoYLTs2UtPluJFXYQy6ZP5oPWuGUyyap9gyB4/UNBvhDSaxDY8Tx3 +8pF3F7NiwIES335XuIOG35yVUAss2TYgm5aak50kshBWtQBLa8auq9hP22IwZtfB +uFsQTlh7pLmLGeRSpYM0/2TXi6DBPVg6ePsMSgBBkpxIZT7/0q+p7BsT8+YvTTwR +aoN+96A1tezcXq3uzOpeqDKjbRW7EGnyOyZsuK64Bwaw39LNUpWmN3Hgz8F6U/bb +R12XvQ6m+jEhBELy+CFhtExBjCcCAwEAAaN+MHwwRgYDVR0jBD8wPaE4pDYwNDEL +MAkGA1UEBhMCVVMxETAPBgNVBAoMCGFyYy10ZXN0MRIwEAYDVQQDDAlsb2NhbGhv +c3SCAQgwCQYDVR0TBAIwADALBgNVHQ8EBAMCBPAwGgYDVR0RBBMwEYcEfwAAAYIJ +bG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQBzcFxAz0ipwItQyBC1PFcY/awF +a3yPrbLMKpK4cFcZiEC2sdnnRHtEaUvcECAIqFqzKTzOQvhXBOSbttgiD9RKUXa4 +Mku2vbkQSmm2IPN3K+t7EbPJHZamDZopruaJkZUmKXqp6Y9unKoXlueHBIS7YTio +0/cu785JOOfZLIa+qGOYavJiT0ga7tA/nDMe0rM9PL2rTanAAtKuAfT73R0Qgggm +0gYswVo8dsaFuMQiY33BQMVEv6y5blisW15fNAinCIgUED89XW6cONacLUWIP0YQ +Fb0v1XVfzMvz51bC2Zl7qeqotf9gNSi5DGsTC9ng0YkRm0smrS5ak+pWANqg +-----END CERTIFICATE----- diff --git a/github/actions/testdata/rootCA.crt b/github/actions/testdata/rootCA.crt new file mode 100644 index 0000000000..7c9c7bbccf --- /dev/null +++ b/github/actions/testdata/rootCA.crt @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC6jCCAdICCQCoZFduxPa/eDANBgkqhkiG9w0BAQsFADA2MQswCQYDVQQGEwJV +UzEnMCUGA1UEAwweYWN0aW9ucy1ydW5uZXItY29udHJvbGxlci10ZXN0MCAXDTIz +MDExOTE2NTAwMVoYDzIwNTAwNjA1MTY1MDAxWjA2MQswCQYDVQQGEwJVUzEnMCUG +A1UEAwweYWN0aW9ucy1ydW5uZXItY29udHJvbGxlci10ZXN0MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAykHCU0I/pdzhnQBwr2N+7so66LPq0cxc8JJL +S2mmk7gg+NWhTZzoci6aYXNRKCyH6B2Wmy7Qveku2wqT2+/4JBMYgTWH5bF7yt76 +LB+x9YruSgH/pBN2WI4vRU87NOAU8F0o0U/Lp5vAJoRo+ePPvcHu0OY1WF+QnEX+ +xtp6gJFGf5DT4U9upwEgQjKgvKFEoB5KNeH1qr2fS2yA2vhm6Uhm+1i/KUQUZ49K +GvFK8TQQT4HXft8rPLP5M9OitdqVU8SX0dQoXZ4M41/qydycHOvApj0LlH/XsicZ +x0mkF90hD+9VRqeYFe562NI4NHR7FGP7HKPWibNjXKC2w+z+aQIDAQABMA0GCSqG +SIb3DQEBCwUAA4IBAQBxaOCnmakd1PPp+pH40OjUktKG1nqM2tGqP0o3Bk7huB2y +jXIDi9ETuTeqqHONwwgsKOVY3J+Zt5R+teBSC0qUnypODzu+9v8Xa4Is9G9GyT5S +erjpPcJjQnvZyMHLH9DGGWE9UCyqKIqmaEc9bwr2oz1+a0rsaS3ZdIFlQibBHij5 +tdJcnzXfN4T4GIbYXKMCOYDy/5CiNJ26l/pQNpO9JCzsEmngw0ooS0Bi8EcTCgB6 +dsHl0w8va3l1kvxWWIlNTGwrAEpRbXmL01hAqx2yCiaFPVZ/eRNWmBWO4LpW4ouK +YOaA+X7geM6XVFlZE3cP58AxYKWHGAThxkZbD5cu +-----END CERTIFICATE----- diff --git a/github/actions/testdata/server.crt b/github/actions/testdata/server.crt new file mode 100644 index 0000000000..be71271a69 --- /dev/null +++ b/github/actions/testdata/server.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDnTCCAoWgAwIBAgIJAJskDVhiEY6fMA0GCSqGSIb3DQEBCwUAMDYxCzAJBgNV +BAYTAlVTMScwJQYDVQQDDB5hY3Rpb25zLXJ1bm5lci1jb250cm9sbGVyLXRlc3Qw +HhcNMjMwMTE5MTY1MTE0WhcNMjQwMTE5MTY1MTE0WjBaMQswCQYDVQQGEwJVUzEi +MCAGA1UECgwZYWN0aW9ucy1ydW5uZXItY29udHJvbGxlcjEnMCUGA1UECwweYWN0 +aW9ucy1ydW5uZXItY29udHJvbGxlciB0ZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAzOTt1/VjuaHzn+b7jLeufW3rxLHFKQV+LiUiT389rbFGY+DN +CC+Nzx+DbFBpKcX/scseVhFzlXlrESWWZ4h7LGMXRsTDKs91F1RMuFCd8eIEwbuV +civR44IqT5r/0hlMOWemd3Fh/c8KF+9dWQ0q0T3tvlVzEbWNRTVAXTT4JzizqNd1 +1hhnuV/KjhiptPC/8jQ4D9ocZKM8a1pM9O2z3bnmH7VTQJkhjxE7gefQTPQRmvKk +C7uqvfk2NHTTnKiLfkE10JhLTa0VND2aofNWCybGTyHNNCNlepakoP3KyFC2LjPR +oR5iwSnCRDu1z8tDWW+rIa3pfxdQ8LnH4J4CDwIDAQABo4GJMIGGMFAGA1UdIwRJ +MEehOqQ4MDYxCzAJBgNVBAYTAlVTMScwJQYDVQQDDB5hY3Rpb25zLXJ1bm5lci1j +b250cm9sbGVyLXRlc3SCCQCoZFduxPa/eDAJBgNVHRMEAjAAMAsGA1UdDwQEAwIE +8DAaBgNVHREEEzARhwR/AAABgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEB +ALdl0ytjellmhtjbXkUZKAl/R2ZXMAVxIOtb4qiN6OOwOMK4p2Wt26p34bQa2JD0 +t0qvesI7spQzQObNMdT6NZJl8Ul0ABuzti/Esvmby+VfsFPasCQVXx+jqGhERqXc +SeZFIVWVACyfAc1dkqfGwehSrY62eBlY2PJ1JezagW6aLAnV6Si+96++mkALJDdX +MZhhSqjxM+Nnmhpy4My6oHVrdYWHcuVhzlEmNaMtmJCYuihIyD2Usn32xJK1k89d +WgEOPCk+ZDAligPlGZS201fsznJk5uIjmxPjjFlJLXotBs8H7j0cQ2JkV5YHsHCk +EYf5EJ0ZKtZbwRFeRC1Ajxg= +-----END CERTIFICATE----- diff --git a/github/actions/testdata/server.key b/github/actions/testdata/server.key new file mode 100644 index 0000000000..f7011fd4de --- /dev/null +++ b/github/actions/testdata/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAzOTt1/VjuaHzn+b7jLeufW3rxLHFKQV+LiUiT389rbFGY+DN +CC+Nzx+DbFBpKcX/scseVhFzlXlrESWWZ4h7LGMXRsTDKs91F1RMuFCd8eIEwbuV +civR44IqT5r/0hlMOWemd3Fh/c8KF+9dWQ0q0T3tvlVzEbWNRTVAXTT4JzizqNd1 +1hhnuV/KjhiptPC/8jQ4D9ocZKM8a1pM9O2z3bnmH7VTQJkhjxE7gefQTPQRmvKk +C7uqvfk2NHTTnKiLfkE10JhLTa0VND2aofNWCybGTyHNNCNlepakoP3KyFC2LjPR +oR5iwSnCRDu1z8tDWW+rIa3pfxdQ8LnH4J4CDwIDAQABAoIBAC5rr3c+IVntV0Tj +EBrRgrboMIJfxEuG8w+BWkSoj1DK2SfHxqwUGgzTFvNzRGAye7vMSRM24Pj8iUVZ +Pro2MbHcwWlHKvCID/85GiioGyCyFGHQHgu/4c2pr+xZMZxoHtzintRw28KlJaRG +lt+WHB1L6pE0yt04RMlpRyvW1GIODtEh1wya61Aa2xZMJxgbNWv89znLI2f3ForY +QR/he8hQtfJQeH+mv2SvJ1bopkJ58ZObKapuJAWCSxzVRj/yol1MqfUDBy4NrJfY +F5UP0BSmnED1EdIXeC0duo5RyiSfHqqJlcKR+zlepOb4pr4I1H8P6AIJ9iiunxUJ +h9i+YAECgYEA7JgrH5sjStcHdwUgEPN4E1MI3WRwDnnuVpzeSUH7tRqK4lsdIKmF +u/ss3TMwTgC8XR4JJbVp+6ea54zpVjtBGwShMSvn2+e7OHHg1YoVqBIgRpL+/C4m +wfon2EglQ0IjscUtKuAR/UyhU6vZtkYRUKeXRKisW4yoobdob0Y4lakCgYEA3bMl +BfszC5c0RXI5vsBUBvr9gXMY/8tacM7H8i3vT7JqoYJG6GGST0/HeK+La3w2zryx +Q8IL6uLy/HZvTHZ+BSp4KzwwgDUIk0jm/JcvzD2ZhJHoAo4aQTc6QI2ZNgjGVwCb +nJ0Niaxc4CdSUEAUHH1bCXk/e2stcnieFuiiPPcCgYAIxrA60OdjPEyzloYU+uMG +XHskszgQ4Wb84X7BWug6VIy4Tsbq0j76tRt57Q8qpY5XKekO9AbFZfcyBaEWKMaG +eQp9p3JHTvY75sV/Rkr9XAbEd2lr805OvbfCpxJyxz5JttWxFHS2X6RQVTyTLVAx +HLZYvqT+FF6g+QuvrPwmWQKBgAQspVvReQqU1EUie3feAzcGbtOLKUNXvuI04orq +1oC3qU5VN6SUgb7Aj87z7zoc4qNN5kCSXMsVbuHWEQ5thL3wKMcXoQoo9Xpgewjy +h9Herw9R9/5kUpY7xfsFL4dW7vUga82tH14iQrVtyBz+t+I5cgdhoxJd2EM5hjCE +PNnNAoGBALPjmvEZ1HJdCOxY/AisziVtOFc6Glk/KhpSIT7WE1me4qLQFmrsHIDQ +kZ8Sb1f3PQ4T4vHGrtl8qh144MJPI1Nb8klzdlD1xeypGpgXoQb5fsC17g1fgczp +TGzq3pvnlGnrgVmnfrWQCHXDLzXtLqM/Pu84guPFftJQ+++yy0np +-----END RSA PRIVATE KEY----- From da019872a0dfdccc24e2c4a4d40a1fe47af5c67e Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Tue, 24 Jan 2023 17:02:23 +0000 Subject: [PATCH 042/561] Fix URL for authenticating using a GitHub app (#2206) Co-authored-by: Nikola Jokic --- github/actions/client.go | 41 +++++++++++++++++++++++++------- github/actions/url_test.go | 48 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 9 deletions(-) create mode 100644 github/actions/url_test.go diff --git a/github/actions/client.go b/github/actions/client.go index fe16433072..fd99dd21ba 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -842,23 +842,17 @@ type accessToken struct { } func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, creds *GitHubAppAuth) (*accessToken, error) { - parsedGitHubConfigURL, err := url.Parse(gitHubConfigURL) - if err != nil { - return nil, err - } - accessTokenJWT, err := createJWTForGitHubApp(creds) if err != nil { return nil, err } - ru := fmt.Sprintf("%v://%v/app/installations/%v/access_tokens", parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host, creds.AppInstallationID) - accessTokenURL, err := url.Parse(ru) + u, err := githubAPIURL(gitHubConfigURL, fmt.Sprintf("/app/installations/%v/access_tokens", creds.AppInstallationID)) if err != nil { return nil, err } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, accessTokenURL.String(), nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, nil) if err != nil { return nil, err } @@ -867,7 +861,7 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", accessTokenJWT)) req.Header.Add("User-Agent", c.userAgent) - c.logger.Info("getting access token for GitHub App auth", "accessTokenURL", accessTokenURL.String()) + c.logger.Info("getting access token for GitHub App auth", "accessTokenURL", u) resp, err := c.Do(req) if err != nil { @@ -1090,3 +1084,32 @@ func (c *Client) refreshTokenIfNeeded(ctx context.Context) error { return nil } + +func githubAPIURL(configURL, path string) (string, error) { + u, err := url.Parse(configURL) + if err != nil { + return "", err + } + + result := &url.URL{ + Scheme: u.Scheme, + } + + switch u.Host { + // Hosted + case "github.com", "github.localhost": + result.Host = fmt.Sprintf("api.%s", u.Host) + // re-routing www.github.com to api.github.com + case "www.github.com": + result.Host = "api.github.com" + + // Enterprise + default: + result.Host = u.Host + result.Path = "/api/v3" + } + + result.Path += path + + return result.String(), nil +} diff --git a/github/actions/url_test.go b/github/actions/url_test.go new file mode 100644 index 0000000000..ae296a30e1 --- /dev/null +++ b/github/actions/url_test.go @@ -0,0 +1,48 @@ +package actions + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGithubAPIURL(t *testing.T) { + tests := []struct { + configURL string + path string + expected string + }{ + { + configURL: "https://github.com/org/repo", + path: "/app/installations/123/access_tokens", + expected: "https://api.github.com/app/installations/123/access_tokens", + }, + { + configURL: "https://www.github.com/org/repo", + path: "/app/installations/123/access_tokens", + expected: "https://api.github.com/app/installations/123/access_tokens", + }, + { + configURL: "http://github.localhost/org/repo", + path: "/app/installations/123/access_tokens", + expected: "http://api.github.localhost/app/installations/123/access_tokens", + }, + { + configURL: "https://my-instance.com/org/repo", + path: "/app/installations/123/access_tokens", + expected: "https://my-instance.com/api/v3/app/installations/123/access_tokens", + }, + { + configURL: "http://localhost/org/repo", + path: "/app/installations/123/access_tokens", + expected: "http://localhost/api/v3/app/installations/123/access_tokens", + }, + } + + for _, test := range tests { + actual, err := githubAPIURL(test.configURL, test.path) + require.NoError(t, err) + assert.Equal(t, test.expected, actual) + } +} From 7f69f0f59619aa9251c859cb54b3250488998b8b Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Thu, 26 Jan 2023 11:52:21 +0100 Subject: [PATCH 043/561] Fix intermittent image push failures to GHCR (#2214) --- .github/workflows/publish-arc2.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/publish-arc2.yaml b/.github/workflows/publish-arc2.yaml index 8b2307f725..0b87ccd4dc 100644 --- a/.github/workflows/publish-arc2.yaml +++ b/.github/workflows/publish-arc2.yaml @@ -66,7 +66,11 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 with: - version: latest + # Pinning v0.9.1 for Buildx and BuildKit v0.10.6 + # BuildKit v0.11 which has a bug causing intermittent + # failures pushing images to GHCR + version: v0.9.1 + driver-opts: image=moby/buildkit:v0.10.6 - name: Login to GitHub Container Registry uses: docker/login-action@v2 From 0417f2a7925c67473830ff86bf2c68018772cd0c Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Thu, 26 Jan 2023 12:19:52 -0500 Subject: [PATCH 044/561] Return error for non-existing runner group. (#2215) --- github/actions/client.go | 2 +- github/actions/client_runner_test.go | 44 ++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/github/actions/client.go b/github/actions/client.go index fd99dd21ba..48ab4a213d 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -300,7 +300,7 @@ func (c *Client) GetRunnerGroupByName(ctx context.Context, runnerGroup string) ( } if runnerGroupList.Count == 0 { - return nil, nil + return nil, fmt.Errorf("no runner group found with name '%s'", runnerGroup) } if runnerGroupList.Count > 1 { diff --git a/github/actions/client_runner_test.go b/github/actions/client_runner_test.go index 9406425adc..38d7b29825 100644 --- a/github/actions/client_runner_test.go +++ b/github/actions/client_runner_test.go @@ -173,3 +173,47 @@ func TestDeleteRunner(t *testing.T) { assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) }) } + +func TestGetRunnerGroupByName(t *testing.T) { + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } + + t.Run("Get RunnerGroup by Name", func(t *testing.T) { + var runnerGroupID int64 = 1 + var runnerGroupName string = "test-runner-group" + want := &actions.RunnerGroup{ + ID: runnerGroupID, + Name: runnerGroupName, + } + response := []byte(`{"count": 1, "value": [{"id": 1, "name": "test-runner-group"}]}`) + + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(response) + })) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + got, err := client.GetRunnerGroupByName(ctx, runnerGroupName) + require.NoError(t, err) + assert.Equal(t, want, got) + }) + + t.Run("Get RunnerGroup by name with not exist runner group", func(t *testing.T) { + var runnerGroupName string = "test-runner-group" + response := []byte(`{"count": 0, "value": []}`) + + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(response) + })) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + got, err := client.GetRunnerGroupByName(ctx, runnerGroupName) + assert.ErrorContains(t, err, "no runner group found with name") + assert.Nil(t, got) + }) +} From 5a142d88d66c30aa43e20e93ace21f0a694b6851 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Jan 2023 09:23:28 +0900 Subject: [PATCH 045/561] chore(deps): bump sigs.k8s.io/controller-runtime from 0.13.1 to 0.14.1 (#2132) Signed-off-by: dependabot[bot] Signed-off-by: Yusuke Kuoka Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Yusuke Kuoka --- Makefile | 53 +++++++ ...ions.github.com_autoscalingrunnersets.yaml | 101 ++++++++++++- .../actions.github.com_ephemeralrunners.yaml | 101 ++++++++++++- ...ctions.github.com_ephemeralrunnersets.yaml | 101 ++++++++++++- ...ions.summerwind.dev_runnerdeployments.yaml | 109 +++++++++++++- ...ions.summerwind.dev_runnerreplicasets.yaml | 109 +++++++++++++- .../crds/actions.summerwind.dev_runners.yaml | 109 +++++++++++++- .../actions.summerwind.dev_runnersets.yaml | 142 +++++++++++++++++- ...ions.github.com_autoscalingrunnersets.yaml | 101 ++++++++++++- .../actions.github.com_ephemeralrunners.yaml | 101 ++++++++++++- ...ctions.github.com_ephemeralrunnersets.yaml | 101 ++++++++++++- ...ions.summerwind.dev_runnerdeployments.yaml | 109 +++++++++++++- ...ions.summerwind.dev_runnerreplicasets.yaml | 109 +++++++++++++- .../bases/actions.summerwind.dev_runners.yaml | 109 +++++++++++++- .../actions.summerwind.dev_runnersets.yaml | 142 +++++++++++++++++- .../autoscalinglistener_controller_test.go | 2 +- .../autoscalingrunnerset_controller.go | 2 +- .../autoscalingrunnerset_controller_test.go | 2 +- controllers/actions.github.com/clientutil.go | 10 ++ .../ephemeralrunner_controller.go | 10 +- .../ephemeralrunner_controller_test.go | 2 +- .../ephemeralrunnerset_controller.go | 2 +- .../ephemeralrunnerset_controller_test.go | 2 +- controllers/actions.github.com/suite_test.go | 13 +- .../horizontal_runner_autoscaler_webhook.go | 122 +++++++-------- ...rizontal_runner_autoscaler_webhook_test.go | 6 +- .../integration_test.go | 2 +- .../runnerdeployment_controller_test.go | 2 +- .../runnerreplicaset_controller_test.go | 2 +- .../actions.summerwind.net/suite_test.go | 13 +- go.mod | 43 +++--- go.sum | 90 +++++------ 32 files changed, 1678 insertions(+), 244 deletions(-) diff --git a/Makefile b/Makefile index 171915bfef..4627ec322c 100644 --- a/Makefile +++ b/Makefile @@ -113,6 +113,59 @@ manifests-gen-crds: controller-gen yq for YAMLFILE in config/crd/bases/actions*.yaml; do \ $(YQ) '.spec.preserveUnknownFields = false' --inplace "$$YAMLFILE" ; \ done + #runners + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.dockerdContainerResources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + #runnerreplicasets + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.dockerdContainerResources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + #runnerdeployments + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.dockerdContainerResources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + #runnersets + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.volumeClaimTemplates.items.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + #autoscalingrunnersets + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml + #ehemeralrunnersets + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + # ephemeralrunners + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml chart-crds: cp config/crd/bases/*.yaml charts/actions-runner-controller/crds/ diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml b/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml index 9d60edaa99..9542f522e3 100644 --- a/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml +++ b/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml @@ -1090,6 +1090,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1902,6 +1914,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2725,6 +2749,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3048,6 +3084,31 @@ spec: - conditionType type: object type: array + resourceClaims: + description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. + type: string + resourceClaimTemplateName: + description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). \n An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string @@ -3057,6 +3118,21 @@ spec: schedulerName: description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. type: string + schedulingGates: + description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. \n This is an alpha-level feature enabled by PodSchedulingReadiness feature gate." + items: + description: PodSchedulingGate is associated to a Pod to guard its scheduling. + properties: + name: + description: Name of the scheduling gate. Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' properties: @@ -3107,7 +3183,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3237,10 +3313,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -3540,7 +3616,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3556,7 +3632,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3567,6 +3643,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -3574,6 +3653,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml index 4712d7662f..f321a85af1 100644 --- a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml +++ b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml @@ -1094,6 +1094,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1906,6 +1918,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2729,6 +2753,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3052,6 +3088,31 @@ spec: - conditionType type: object type: array + resourceClaims: + description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. + type: string + resourceClaimTemplateName: + description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). \n An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string @@ -3061,6 +3122,21 @@ spec: schedulerName: description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. type: string + schedulingGates: + description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. \n This is an alpha-level feature enabled by PodSchedulingReadiness feature gate." + items: + description: PodSchedulingGate is associated to a Pod to guard its scheduling. + properties: + name: + description: Name of the scheduling gate. Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' properties: @@ -3111,7 +3187,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3241,10 +3317,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -3544,7 +3620,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3560,7 +3636,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3571,6 +3647,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -3578,6 +3657,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml index 913aee5dc2..d4b2d35130 100644 --- a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml +++ b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml @@ -1076,6 +1076,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1888,6 +1900,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2711,6 +2735,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3034,6 +3070,31 @@ spec: - conditionType type: object type: array + resourceClaims: + description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. + type: string + resourceClaimTemplateName: + description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). \n An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string @@ -3043,6 +3104,21 @@ spec: schedulerName: description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. type: string + schedulingGates: + description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. \n This is an alpha-level feature enabled by PodSchedulingReadiness feature gate." + items: + description: PodSchedulingGate is associated to a Pod to guard its scheduling. + properties: + name: + description: Name of the scheduling gate. Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' properties: @@ -3093,7 +3169,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3223,10 +3299,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -3526,7 +3602,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3542,7 +3618,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3553,6 +3629,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -3560,6 +3639,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml index 0d73224dff..f3254b9876 100644 --- a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml +++ b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml @@ -1081,6 +1081,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1500,6 +1512,18 @@ spec: dockerdContainerResources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2141,6 +2165,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2963,6 +2999,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3256,6 +3304,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3328,7 +3388,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3873,6 +3933,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -4221,10 +4293,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -4554,7 +4626,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4570,7 +4642,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4581,6 +4653,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -4588,6 +4663,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -5216,6 +5303,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerreplicasets.yaml b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerreplicasets.yaml index b568548a10..a2e6aba749 100644 --- a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerreplicasets.yaml +++ b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerreplicasets.yaml @@ -1078,6 +1078,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1497,6 +1509,18 @@ spec: dockerdContainerResources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2138,6 +2162,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2960,6 +2996,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3253,6 +3301,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3325,7 +3385,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3870,6 +3930,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -4218,10 +4290,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -4551,7 +4623,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4567,7 +4639,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4578,6 +4650,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -4585,6 +4660,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -5213,6 +5300,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml b/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml index 4129a909c4..beaea51b0a 100644 --- a/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml +++ b/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml @@ -1031,6 +1031,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1450,6 +1462,18 @@ spec: dockerdContainerResources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2091,6 +2115,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2913,6 +2949,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3206,6 +3254,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3278,7 +3338,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3823,6 +3883,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -4171,10 +4243,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -4504,7 +4576,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4520,7 +4592,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4531,6 +4603,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -4538,6 +4613,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -5166,6 +5253,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnersets.yaml b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnersets.yaml index 326a4e4433..7cdb81561d 100644 --- a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnersets.yaml +++ b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnersets.yaml @@ -89,6 +89,14 @@ spec: description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) format: int32 type: integer + ordinals: + description: ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a "0" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha. + properties: + start: + description: 'start is the number representing the first replica''s index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range: [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). If unset, defaults to 0. Replica indices will be in the range: [0, .spec.replicas).' + format: int32 + type: integer + type: object organization: pattern: ^[^/]+$ type: string @@ -152,7 +160,7 @@ spec: description: 'serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where "pod-specific-string" is managed by the StatefulSet controller.' type: string template: - description: template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. + description: template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named "web" with index number "3" would be named "web-3". properties: metadata: description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' @@ -1151,6 +1159,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1963,6 +1983,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2786,6 +2818,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3109,6 +3153,31 @@ spec: - conditionType type: object type: array + resourceClaims: + description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. + type: string + resourceClaimTemplateName: + description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). \n An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string @@ -3118,6 +3187,21 @@ spec: schedulerName: description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. type: string + schedulingGates: + description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. \n This is an alpha-level feature enabled by PodSchedulingReadiness feature gate." + items: + description: PodSchedulingGate is associated to a Pod to guard its scheduling. + properties: + name: + description: Name of the scheduling gate. Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' properties: @@ -3168,7 +3252,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3298,10 +3382,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -3601,7 +3685,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3617,7 +3701,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3628,6 +3712,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -3635,6 +3722,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -4317,7 +4416,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4333,7 +4432,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4344,6 +4443,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -4351,6 +4453,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -4493,6 +4607,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml index 9d60edaa99..9542f522e3 100644 --- a/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml +++ b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml @@ -1090,6 +1090,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1902,6 +1914,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2725,6 +2749,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3048,6 +3084,31 @@ spec: - conditionType type: object type: array + resourceClaims: + description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. + type: string + resourceClaimTemplateName: + description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). \n An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string @@ -3057,6 +3118,21 @@ spec: schedulerName: description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. type: string + schedulingGates: + description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. \n This is an alpha-level feature enabled by PodSchedulingReadiness feature gate." + items: + description: PodSchedulingGate is associated to a Pod to guard its scheduling. + properties: + name: + description: Name of the scheduling gate. Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' properties: @@ -3107,7 +3183,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3237,10 +3313,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -3540,7 +3616,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3556,7 +3632,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3567,6 +3643,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -3574,6 +3653,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/config/crd/bases/actions.github.com_ephemeralrunners.yaml b/config/crd/bases/actions.github.com_ephemeralrunners.yaml index 4712d7662f..f321a85af1 100644 --- a/config/crd/bases/actions.github.com_ephemeralrunners.yaml +++ b/config/crd/bases/actions.github.com_ephemeralrunners.yaml @@ -1094,6 +1094,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1906,6 +1918,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2729,6 +2753,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3052,6 +3088,31 @@ spec: - conditionType type: object type: array + resourceClaims: + description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. + type: string + resourceClaimTemplateName: + description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). \n An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string @@ -3061,6 +3122,21 @@ spec: schedulerName: description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. type: string + schedulingGates: + description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. \n This is an alpha-level feature enabled by PodSchedulingReadiness feature gate." + items: + description: PodSchedulingGate is associated to a Pod to guard its scheduling. + properties: + name: + description: Name of the scheduling gate. Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' properties: @@ -3111,7 +3187,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3241,10 +3317,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -3544,7 +3620,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3560,7 +3636,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3571,6 +3647,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -3578,6 +3657,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml b/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml index 913aee5dc2..d4b2d35130 100644 --- a/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml +++ b/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml @@ -1076,6 +1076,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1888,6 +1900,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2711,6 +2735,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3034,6 +3070,31 @@ spec: - conditionType type: object type: array + resourceClaims: + description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. + type: string + resourceClaimTemplateName: + description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). \n An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string @@ -3043,6 +3104,21 @@ spec: schedulerName: description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. type: string + schedulingGates: + description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. \n This is an alpha-level feature enabled by PodSchedulingReadiness feature gate." + items: + description: PodSchedulingGate is associated to a Pod to guard its scheduling. + properties: + name: + description: Name of the scheduling gate. Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' properties: @@ -3093,7 +3169,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3223,10 +3299,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -3526,7 +3602,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3542,7 +3618,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3553,6 +3629,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -3560,6 +3639,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml b/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml index 0d73224dff..f3254b9876 100644 --- a/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml +++ b/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml @@ -1081,6 +1081,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1500,6 +1512,18 @@ spec: dockerdContainerResources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2141,6 +2165,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2963,6 +2999,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3256,6 +3304,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3328,7 +3388,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3873,6 +3933,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -4221,10 +4293,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -4554,7 +4626,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4570,7 +4642,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4581,6 +4653,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -4588,6 +4663,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -5216,6 +5303,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml b/config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml index b568548a10..a2e6aba749 100644 --- a/config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml +++ b/config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml @@ -1078,6 +1078,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1497,6 +1509,18 @@ spec: dockerdContainerResources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2138,6 +2162,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2960,6 +2996,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3253,6 +3301,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3325,7 +3385,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3870,6 +3930,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -4218,10 +4290,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -4551,7 +4623,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4567,7 +4639,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4578,6 +4650,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -4585,6 +4660,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -5213,6 +5300,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/config/crd/bases/actions.summerwind.dev_runners.yaml b/config/crd/bases/actions.summerwind.dev_runners.yaml index 4129a909c4..beaea51b0a 100644 --- a/config/crd/bases/actions.summerwind.dev_runners.yaml +++ b/config/crd/bases/actions.summerwind.dev_runners.yaml @@ -1031,6 +1031,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1450,6 +1462,18 @@ spec: dockerdContainerResources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2091,6 +2115,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2913,6 +2949,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3206,6 +3254,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3278,7 +3338,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3823,6 +3883,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -4171,10 +4243,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -4504,7 +4576,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4520,7 +4592,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4531,6 +4603,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -4538,6 +4613,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -5166,6 +5253,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/config/crd/bases/actions.summerwind.dev_runnersets.yaml b/config/crd/bases/actions.summerwind.dev_runnersets.yaml index 326a4e4433..7cdb81561d 100644 --- a/config/crd/bases/actions.summerwind.dev_runnersets.yaml +++ b/config/crd/bases/actions.summerwind.dev_runnersets.yaml @@ -89,6 +89,14 @@ spec: description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) format: int32 type: integer + ordinals: + description: ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a "0" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha. + properties: + start: + description: 'start is the number representing the first replica''s index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range: [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). If unset, defaults to 0. Replica indices will be in the range: [0, .spec.replicas).' + format: int32 + type: integer + type: object organization: pattern: ^[^/]+$ type: string @@ -152,7 +160,7 @@ spec: description: 'serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where "pod-specific-string" is managed by the StatefulSet controller.' type: string template: - description: template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. + description: template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named "web" with index number "3" would be named "web-3". properties: metadata: description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' @@ -1151,6 +1159,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -1963,6 +1983,18 @@ spec: resources: description: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -2786,6 +2818,18 @@ spec: resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -3109,6 +3153,31 @@ spec: - conditionType type: object type: array + resourceClaims: + description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. + type: string + resourceClaimTemplateName: + description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). \n An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: description: 'Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' type: string @@ -3118,6 +3187,21 @@ spec: schedulerName: description: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. type: string + schedulingGates: + description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. \n This is an alpha-level feature enabled by PodSchedulingReadiness feature gate." + items: + description: PodSchedulingGate is associated to a Pod to guard its scheduling. + properties: + name: + description: Name of the scheduling gate. Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: description: 'SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.' properties: @@ -3168,7 +3252,7 @@ spec: - type type: object supplementalGroups: - description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -3298,10 +3382,10 @@ spec: format: int32 type: integer nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. @@ -3601,7 +3685,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3617,7 +3701,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -3628,6 +3712,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -3635,6 +3722,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -4317,7 +4416,7 @@ spec: type: string type: array dataSource: - description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.' + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4333,7 +4432,7 @@ spec: - name type: object dataSourceRef: - description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. @@ -4344,6 +4443,9 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string required: - kind - name @@ -4351,6 +4453,18 @@ spec: resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: @@ -4493,6 +4607,18 @@ spec: resources: description: ResourceRequirements describes the compute resource requirements. properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array limits: additionalProperties: anyOf: diff --git a/controllers/actions.github.com/autoscalinglistener_controller_test.go b/controllers/actions.github.com/autoscalinglistener_controller_test.go index b16fd7c831..09961efd23 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller_test.go +++ b/controllers/actions.github.com/autoscalinglistener_controller_test.go @@ -11,7 +11,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 2be67c0ef8..a268d560d9 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -219,7 +219,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl // Update the status of autoscaling runner set. if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners { - if err := patch(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { + if err := patchSubResource(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { obj.Status.CurrentRunners = latestRunnerSet.Status.CurrentReplicas }); err != nil { log.Error(err, "Failed to update autoscaling runner set status with current runner count") diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index 460dc1fc29..ca8def027e 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -10,7 +10,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/controllers/actions.github.com/clientutil.go b/controllers/actions.github.com/clientutil.go index e3dfbebb4f..876d8dfd43 100644 --- a/controllers/actions.github.com/clientutil.go +++ b/controllers/actions.github.com/clientutil.go @@ -20,3 +20,13 @@ func patch[T object[T]](ctx context.Context, client patcher, obj T, update func( update(obj) return client.Patch(ctx, obj, kclient.MergeFrom(original)) } + +type subResourcePatcher interface { + Patch(ctx context.Context, obj kclient.Object, patch kclient.Patch, opts ...kclient.SubResourcePatchOption) error +} + +func patchSubResource[T object[T]](ctx context.Context, client subResourcePatcher, obj T, update func(obj T)) error { + original := obj.DeepCopy() + update(obj) + return client.Patch(ctx, obj, kclient.MergeFrom(original)) +} diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go index b6945d2986..7ee546cb07 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller.go +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -369,7 +369,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error { log.Info("Updating ephemeral runner status to Failed") - if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { obj.Status.Phase = corev1.PodFailed obj.Status.Reason = "TooManyPodFailures" obj.Status.Message = "Pod has failed to start more than 5 times" @@ -388,7 +388,7 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error { log.Info("Updating ephemeral runner status to Finished") - if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { obj.Status.Phase = corev1.PodSucceeded }); err != nil { return fmt.Errorf("failed to update ephemeral runner with status finished: %v", err) @@ -409,7 +409,7 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem } log.Info("Updating ephemeral runner status to track the failure count") - if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { if obj.Status.Failures == nil { obj.Status.Failures = make(map[string]bool) } @@ -487,7 +487,7 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id) log.Info("Updating ephemeral runner status with runnerId and runnerJITConfig") - err = patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + err = patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { obj.Status.RunnerId = jitConfig.Runner.Id obj.Status.RunnerName = jitConfig.Runner.Name obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig @@ -556,7 +556,7 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context, } log.Info("Updating ephemeral runner status with pod phase", "phase", pod.Status.Phase, "reason", pod.Status.Reason, "message", pod.Status.Message) - err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { obj.Status.Phase = pod.Status.Phase obj.Status.Ready = obj.Status.Ready || (pod.Status.Phase == corev1.PodRunning) obj.Status.Reason = pod.Status.Reason diff --git a/controllers/actions.github.com/ephemeralrunner_controller_test.go b/controllers/actions.github.com/ephemeralrunner_controller_test.go index 749b8b2cf9..f8166abeb2 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunner_controller_test.go @@ -10,7 +10,7 @@ import ( "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions/fake" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index ed0776a9be..6744e1bad2 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -182,7 +182,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R // Update the status if needed. if ephemeralRunnerSet.Status.CurrentReplicas != total { log.Info("Updating status with current runners count", "count", total) - if err := patch(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) { + if err := patchSubResource(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) { obj.Status.CurrentReplicas = total }); err != nil { log.Error(err, "Failed to update status with current runners count") diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go index 095b0ae351..e51d91dd42 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -11,7 +11,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/controllers/actions.github.com/suite_test.go b/controllers/actions.github.com/suite_test.go index 29b07f0de5..c0a12a37dc 100644 --- a/controllers/actions.github.com/suite_test.go +++ b/controllers/actions.github.com/suite_test.go @@ -23,7 +23,7 @@ import ( "github.com/onsi/ginkgo/config" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" @@ -31,7 +31,6 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" // +kubebuilder:scaffold:imports @@ -49,12 +48,10 @@ func TestAPIs(t *testing.T) { config.GinkgoConfig.FocusStrings = append(config.GinkgoConfig.FocusStrings, os.Getenv("GINKGO_FOCUS")) - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) + RunSpecs(t, "Controller Suite") } -var _ = BeforeSuite(func(done Done) { +var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) By("bootstrapping test environment") @@ -80,9 +77,7 @@ var _ = BeforeSuite(func(done Done) { k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).ToNot(HaveOccurred()) Expect(k8sClient).ToNot(BeNil()) - - close(done) -}, 60) +}) var _ = AfterSuite(func() { By("tearing down the test environment") diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go index e3710924d2..5f1f15007c 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go @@ -681,78 +681,80 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr autoscaler.Recorder = mgr.GetEventRecorderFor(name) - if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj client.Object) []string { - hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler) + if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, autoscaler.indexer); err != nil { + return err + } + + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.HorizontalRunnerAutoscaler{}). + Named(name). + Complete(autoscaler) +} + +func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) indexer(rawObj client.Object) []string { + hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler) + + if hra.Spec.ScaleTargetRef.Name == "" { + autoscaler.Log.V(1).Info(fmt.Sprintf("scale target ref name not set for hra %s", hra.Name)) + return nil + } - if hra.Spec.ScaleTargetRef.Name == "" { - autoscaler.Log.V(1).Info(fmt.Sprintf("scale target ref name not set for hra %s", hra.Name)) + switch hra.Spec.ScaleTargetRef.Kind { + case "", "RunnerDeployment": + var rd v1alpha1.RunnerDeployment + if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil { + autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerDeployment not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name)) return nil } - switch hra.Spec.ScaleTargetRef.Kind { - case "", "RunnerDeployment": - var rd v1alpha1.RunnerDeployment - if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil { - autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerDeployment not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name)) - return nil - } - - keys := []string{} - if rd.Spec.Template.Spec.Repository != "" { - keys = append(keys, rd.Spec.Template.Spec.Repository) // Repository runners - } - if rd.Spec.Template.Spec.Organization != "" { - if group := rd.Spec.Template.Spec.Group; group != "" { - keys = append(keys, organizationalRunnerGroupKey(rd.Spec.Template.Spec.Organization, rd.Spec.Template.Spec.Group)) // Organization runner groups - } else { - keys = append(keys, rd.Spec.Template.Spec.Organization) // Organization runners - } - } - if enterprise := rd.Spec.Template.Spec.Enterprise; enterprise != "" { - if group := rd.Spec.Template.Spec.Group; group != "" { - keys = append(keys, enterpriseRunnerGroupKey(enterprise, rd.Spec.Template.Spec.Group)) // Enterprise runner groups - } else { - keys = append(keys, enterpriseKey(enterprise)) // Enterprise runners - } + keys := []string{} + if rd.Spec.Template.Spec.Repository != "" { + keys = append(keys, rd.Spec.Template.Spec.Repository) // Repository runners + } + if rd.Spec.Template.Spec.Organization != "" { + if group := rd.Spec.Template.Spec.Group; group != "" { + keys = append(keys, organizationalRunnerGroupKey(rd.Spec.Template.Spec.Organization, rd.Spec.Template.Spec.Group)) // Organization runner groups + } else { + keys = append(keys, rd.Spec.Template.Spec.Organization) // Organization runners } - autoscaler.Log.V(2).Info(fmt.Sprintf("HRA keys indexed for HRA %s: %v", hra.Name, keys)) - return keys - case "RunnerSet": - var rs v1alpha1.RunnerSet - if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil { - autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerSet not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name)) - return nil + } + if enterprise := rd.Spec.Template.Spec.Enterprise; enterprise != "" { + if group := rd.Spec.Template.Spec.Group; group != "" { + keys = append(keys, enterpriseRunnerGroupKey(enterprise, rd.Spec.Template.Spec.Group)) // Enterprise runner groups + } else { + keys = append(keys, enterpriseKey(enterprise)) // Enterprise runners } + } + autoscaler.Log.V(2).Info(fmt.Sprintf("HRA keys indexed for HRA %s: %v", hra.Name, keys)) + return keys + case "RunnerSet": + var rs v1alpha1.RunnerSet + if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil { + autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerSet not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name)) + return nil + } - keys := []string{} - if rs.Spec.Repository != "" { - keys = append(keys, rs.Spec.Repository) // Repository runners - } - if rs.Spec.Organization != "" { - keys = append(keys, rs.Spec.Organization) // Organization runners - if group := rs.Spec.Group; group != "" { - keys = append(keys, organizationalRunnerGroupKey(rs.Spec.Organization, rs.Spec.Group)) // Organization runner groups - } + keys := []string{} + if rs.Spec.Repository != "" { + keys = append(keys, rs.Spec.Repository) // Repository runners + } + if rs.Spec.Organization != "" { + keys = append(keys, rs.Spec.Organization) // Organization runners + if group := rs.Spec.Group; group != "" { + keys = append(keys, organizationalRunnerGroupKey(rs.Spec.Organization, rs.Spec.Group)) // Organization runner groups } - if enterprise := rs.Spec.Enterprise; enterprise != "" { - keys = append(keys, enterpriseKey(enterprise)) // Enterprise runners - if group := rs.Spec.Group; group != "" { - keys = append(keys, enterpriseRunnerGroupKey(enterprise, rs.Spec.Group)) // Enterprise runner groups - } + } + if enterprise := rs.Spec.Enterprise; enterprise != "" { + keys = append(keys, enterpriseKey(enterprise)) // Enterprise runners + if group := rs.Spec.Group; group != "" { + keys = append(keys, enterpriseRunnerGroupKey(enterprise, rs.Spec.Group)) // Enterprise runner groups } - autoscaler.Log.V(2).Info(fmt.Sprintf("HRA keys indexed for HRA %s: %v", hra.Name, keys)) - return keys } - - return nil - }); err != nil { - return err + autoscaler.Log.V(2).Info(fmt.Sprintf("HRA keys indexed for HRA %s: %v", hra.Name, keys)) + return keys } - return ctrl.NewControllerManagedBy(mgr). - For(&v1alpha1.HorizontalRunnerAutoscaler{}). - Named(name). - Complete(autoscaler) + return nil } func enterpriseKey(name string) string { diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go index 1675b88be4..20f81f17f2 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go @@ -431,7 +431,11 @@ func testServerWithInitObjs(t *testing.T, eventType string, event interface{}, w hraWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{} - client := fake.NewClientBuilder().WithScheme(sc).WithRuntimeObjects(initObjs...).Build() + client := fake.NewClientBuilder(). + WithScheme(sc). + WithRuntimeObjects(initObjs...). + WithIndex(&actionsv1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, hraWebhook.indexer). + Build() logs := installTestLogger(hraWebhook) diff --git a/controllers/actions.summerwind.net/integration_test.go b/controllers/actions.summerwind.net/integration_test.go index c303eac2e0..d5d967d141 100644 --- a/controllers/actions.summerwind.net/integration_test.go +++ b/controllers/actions.summerwind.net/integration_test.go @@ -18,7 +18,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/controllers/actions.summerwind.net/runnerdeployment_controller_test.go b/controllers/actions.summerwind.net/runnerdeployment_controller_test.go index 1013572a98..5fe065665b 100644 --- a/controllers/actions.summerwind.net/runnerdeployment_controller_test.go +++ b/controllers/actions.summerwind.net/runnerdeployment_controller_test.go @@ -15,7 +15,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go b/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go index 13a66343f1..f018a45694 100644 --- a/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go +++ b/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go @@ -11,7 +11,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/controllers/actions.summerwind.net/suite_test.go b/controllers/actions.summerwind.net/suite_test.go index 24aa973f6a..0ef9fce185 100644 --- a/controllers/actions.summerwind.net/suite_test.go +++ b/controllers/actions.summerwind.net/suite_test.go @@ -23,7 +23,7 @@ import ( "github.com/onsi/ginkgo/config" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1" @@ -31,7 +31,6 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" // +kubebuilder:scaffold:imports @@ -49,12 +48,10 @@ func TestAPIs(t *testing.T) { config.GinkgoConfig.FocusStrings = append(config.GinkgoConfig.FocusStrings, os.Getenv("GINKGO_FOCUS")) - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) + RunSpecs(t, "Controller Suite") } -var _ = BeforeSuite(func(done Done) { +var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) By("bootstrapping test environment") @@ -80,9 +77,7 @@ var _ = BeforeSuite(func(done Done) { k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).ToNot(HaveOccurred()) Expect(k8sClient).ToNot(BeNil()) - - close(done) -}, 60) +}) var _ = AfterSuite(func() { By("tearing down the test environment") diff --git a/go.mod b/go.mod index 1f7270245c..237baa21f4 100644 --- a/go.mod +++ b/go.mod @@ -17,40 +17,37 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.2 github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 + github.com/onsi/ginkgo/v2 v2.7.0 github.com/onsi/gomega v1.25.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.13.0 + github.com/prometheus/client_golang v1.14.0 github.com/stretchr/testify v1.8.0 github.com/teambition/rrule-go v1.8.0 go.uber.org/multierr v1.7.0 - go.uber.org/zap v1.23.0 + go.uber.org/zap v1.24.0 golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 gomodules.xyz/jsonpatch/v2 v2.2.0 - k8s.io/api v0.25.2 - k8s.io/apimachinery v0.25.2 - k8s.io/client-go v0.25.2 - sigs.k8s.io/controller-runtime v0.13.0 + k8s.io/api v0.26.0 + k8s.io/apimachinery v0.26.0 + k8s.io/client-go v0.26.0 + sigs.k8s.io/controller-runtime v0.14.1 sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go/compute v1.12.1 // indirect - cloud.google.com/go/compute/metadata v0.2.1 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/aws/aws-sdk-go v1.40.56 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect - github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect github.com/go-logr/zapr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.19.14 // indirect github.com/go-sql-driver/mysql v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -70,16 +67,15 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nxadm/tail v1.4.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pquerna/otp v1.2.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect @@ -87,23 +83,22 @@ require ( github.com/stretchr/objx v0.4.0 // indirect github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect - golang.org/x/crypto v0.0.0-20220824171710-5757bc0c5503 // indirect + golang.org/x/crypto v0.1.0 // indirect golang.org/x/net v0.5.0 // indirect golang.org/x/sys v0.4.0 // indirect golang.org/x/term v0.4.0 // indirect golang.org/x/text v0.6.0 // indirect - golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect + golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.25.0 // indirect - k8s.io/component-base v0.25.0 // indirect - k8s.io/klog/v2 v2.70.1 // indirect - k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect + k8s.io/apiextensions-apiserver v0.26.0 // indirect + k8s.io/component-base v0.26.0 // indirect + k8s.io/klog/v2 v2.80.1 // indirect + k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect + k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index c71112be30..64d3957d7b 100644 --- a/go.sum +++ b/go.sum @@ -19,10 +19,6 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -37,10 +33,6 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI= github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -80,8 +72,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -94,8 +86,8 @@ github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2Vvl github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -112,7 +104,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -122,8 +113,8 @@ github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -270,8 +261,8 @@ github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb44 github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg= github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= +github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= @@ -290,13 +281,13 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= @@ -314,13 +305,14 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= @@ -342,7 +334,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= @@ -375,13 +366,13 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -389,8 +380,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220824171710-5757bc0c5503 h1:vJ2V3lFLg+bBhgroYuRfyN583UzVveQmIXjc8T/y3to= -golang.org/x/crypto v0.0.0-20220824171710-5757bc0c5503/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -436,7 +427,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -525,7 +515,7 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -544,8 +534,8 @@ golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -686,7 +676,6 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -708,28 +697,27 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8= -k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0= -k8s.io/apiextensions-apiserver v0.25.0 h1:CJ9zlyXAbq0FIW8CD7HHyozCMBpDSiH7EdrSTCZcZFY= -k8s.io/apiextensions-apiserver v0.25.0/go.mod h1:3pAjZiN4zw7R8aZC5gR0y3/vCkGlAjCazcg1me8iB/E= -k8s.io/apimachinery v0.25.2 h1:WbxfAjCx+AeN8Ilp9joWnyJ6xu9OMeS/fsfjK/5zaQs= -k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA= -k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo= -k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= -k8s.io/component-base v0.25.0 h1:haVKlLkPCFZhkcqB6WCvpVxftrg6+FK5x1ZuaIDaQ5Y= -k8s.io/component-base v0.25.0/go.mod h1:F2Sumv9CnbBlqrpdf7rKZTmmd2meJq0HizeyY/yAFxk= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= +k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= +k8s.io/apiextensions-apiserver v0.26.0 h1:Gy93Xo1eg2ZIkNX/8vy5xviVSxwQulsnUdQ00nEdpDo= +k8s.io/apiextensions-apiserver v0.26.0/go.mod h1:7ez0LTiyW5nq3vADtK6C3kMESxadD51Bh6uz3JOlqWQ= +k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= +k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= +k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= +k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= +k8s.io/component-base v0.26.0 h1:0IkChOCohtDHttmKuz+EP3j3+qKmV55rM9gIFTXA7Vs= +k8s.io/component-base v0.26.0/go.mod h1:lqHwlfV1/haa14F/Z5Zizk5QmzaVf23nQzCwVOQpfC8= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= +k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRMc2WIQ= -sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= +sigs.k8s.io/controller-runtime v0.14.1 h1:vThDes9pzg0Y+UbCPY3Wj34CGIYPgdmspPm2GIpxpzM= +sigs.k8s.io/controller-runtime v0.14.1/go.mod h1:GaRkrY8a7UZF0kqFFbUKG7n9ICiTY5T55P1RiE3UZlU= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= From be80b83e43fe09b437bff78c94375fc6a2df82e0 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Fri, 27 Jan 2023 09:27:52 -0500 Subject: [PATCH 046/561] Allow update runner group for AutoScalingRunnerSet (#2216) --- .../autoscalingrunnerset_controller.go | 58 +++++++++++++++++-- .../autoscalingrunnerset_controller_test.go | 9 ++- github/actions/client.go | 42 ++++++++++++++ .../actions/client_runner_scale_set_test.go | 50 ++++++++++++++++ github/actions/fake/client.go | 21 +++++++ github/actions/mock_ActionsService.go | 23 ++++++++ 6 files changed, 196 insertions(+), 7 deletions(-) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index a268d560d9..bae05cdf1b 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -21,6 +21,7 @@ import ( "fmt" "sort" "strconv" + "strings" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/github/actions" @@ -47,6 +48,7 @@ const ( LabelKeyAutoScaleRunnerSetName = "auto-scale-runner-set-name" autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" runnerScaleSetIdKey = "runner-scale-set-id" + runnerScaleSetRunnerGroupNameKey = "runner-scale-set-runner-group-name" // scaleSetListenerLabel is the key of pod.meta.labels to label // that the pod is a listener application @@ -150,6 +152,13 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log) } + // Make sure the runner group of the scale set is up to date + currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetRunnerGroupNameKey] + if !ok || !strings.EqualFold(currentRunnerGroupName, autoscalingRunnerSet.Spec.RunnerGroup) { + log.Info("AutoScalingRunnerSet runner group changed. Updating the runner scale set.") + return r.updateRunnerScaleSetRunnerGroup(ctx, autoscalingRunnerSet, log) + } + secret := new(corev1.Secret) if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, secret); err != nil { log.Error(err, "Failed to find GitHub config secret.", @@ -299,8 +308,8 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex return ctrl.Result{}, err } + runnerGroupId := 1 if runnerScaleSet == nil { - runnerGroupId := 1 if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 { runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup) if err != nil { @@ -338,10 +347,12 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex autoscalingRunnerSet.Annotations = map[string]string{} } - autoscalingRunnerSet.Annotations[runnerScaleSetIdKey] = strconv.Itoa(runnerScaleSet.Id) - logger.Info("Adding runner scale set ID as an annotation") - if err := r.Update(ctx, autoscalingRunnerSet); err != nil { - logger.Error(err, "Failed to add runner scale set ID") + logger.Info("Adding runner scale set ID and runner group name as an annotation") + if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { + obj.Annotations[runnerScaleSetIdKey] = strconv.Itoa(runnerScaleSet.Id) + obj.Annotations[runnerScaleSetRunnerGroupNameKey] = runnerScaleSet.RunnerGroupName + }); err != nil { + logger.Error(err, "Failed to add runner scale set ID and runner group name as an annotation") return ctrl.Result{}, err } @@ -349,6 +360,43 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex return ctrl.Result{}, nil } +func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) { + runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) + if err != nil { + logger.Error(err, "Failed to parse runner scale set ID") + return ctrl.Result{}, err + } + + actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) + if err != nil { + logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set") + return ctrl.Result{}, err + } + + runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup) + if err != nil { + logger.Error(err, "Failed to get runner group by name", "runnerGroup", autoscalingRunnerSet.Spec.RunnerGroup) + return ctrl.Result{}, err + } + + updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetId, &actions.RunnerScaleSet{Name: autoscalingRunnerSet.Name, RunnerGroupId: int(runnerGroup.ID)}) + if err != nil { + logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetId) + return ctrl.Result{}, err + } + + logger.Info("Updating runner scale set runner group name as an annotation") + if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { + obj.Annotations[runnerScaleSetRunnerGroupNameKey] = updatedRunnerScaleSet.RunnerGroupName + }); err != nil { + logger.Error(err, "Failed to update runner group name annotation") + return ctrl.Result{}, err + } + + logger.Info("Updated runner scale set with match runner group", "runnerGroup", updatedRunnerScaleSet.RunnerGroupName) + return ctrl.Result{}, nil +} + func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) { desiredRunnerSet, err := r.resourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet) if err != nil { diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index ca8def027e..d281d8ccc7 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -83,6 +83,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { GitHubConfigSecret: configSecret.Name, MaxRunners: &max, MinRunners: &min, + RunnerGroup: "testgroup", Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -144,10 +145,14 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { return "", nil } - return created.Annotations[runnerScaleSetIdKey], nil + if _, ok := created.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok { + return "", nil + } + + return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIdKey], created.Annotations[runnerScaleSetRunnerGroupNameKey]), nil }, autoscalingRunnerSetTestTimeout, - autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("1"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation") + autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("1_testgroup"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation") // Check if ephemeral runner set is created Eventually( diff --git a/github/actions/client.go b/github/actions/client.go index 48ab4a213d..fdadc70b8c 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -36,6 +36,7 @@ type ActionsService interface { GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int) (*RunnerScaleSet, error) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (*RunnerGroup, error) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) + UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*RunnerScaleSetSession, error) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error @@ -350,6 +351,47 @@ func (c *Client) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *Runne return createdRunnerScaleSet, nil } +func (c *Client) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) { + u := fmt.Sprintf("%s/%s/%d?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) + + if err := c.refreshTokenIfNeeded(ctx); err != nil { + return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) + } + + body, err := json.Marshal(runnerScaleSet) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPatch, u, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + resp, err := c.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, ParseActionsErrorFromResponse(resp) + } + + var updatedRunnerScaleSet *RunnerScaleSet + err = unmarshalBody(resp, &updatedRunnerScaleSet) + if err != nil { + return nil, err + } + return updatedRunnerScaleSet, nil +} + func (c *Client) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int) error { u := fmt.Sprintf("%s/%s/%d?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) diff --git a/github/actions/client_runner_scale_set_test.go b/github/actions/client_runner_scale_set_test.go index e9d86e2d8b..980b9846d3 100644 --- a/github/actions/client_runner_scale_set_test.go +++ b/github/actions/client_runner_scale_set_test.go @@ -336,3 +336,53 @@ func TestCreateRunnerScaleSet(t *testing.T) { assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) }) } + +func TestUpdateRunnerScaleSet(t *testing.T) { + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } + + scaleSetCreationDateTime := time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC) + runnerScaleSet := actions.RunnerScaleSet{Id: 1, Name: "ScaleSet", RunnerGroupId: 1, RunnerGroupName: "group", CreatedOn: scaleSetCreationDateTime, RunnerSetting: actions.RunnerSetting{}} + + t.Run("Update runner scale set", func(t *testing.T) { + want := &runnerScaleSet + rsl, err := json.Marshal(want) + require.NoError(t, err) + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Write(rsl) + })) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + got, err := client.UpdateRunnerScaleSet(ctx, 1, &actions.RunnerScaleSet{RunnerGroupId: 1}) + require.NoError(t, err) + assert.Equal(t, want, got) + }) + + t.Run("UpdateRunnerScaleSet calls correct url", func(t *testing.T) { + rsl, err := json.Marshal(&runnerScaleSet) + require.NoError(t, err) + url := url.URL{} + method := "" + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(rsl) + url = *r.URL + method = r.Method + })) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.UpdateRunnerScaleSet(ctx, 1, &runnerScaleSet) + require.NoError(t, err) + + u := url.String() + expectedUrl := "/_apis/runtime/runnerscalesets/1?api-version=6.0-preview" + assert.Equal(t, expectedUrl, u) + + assert.Equal(t, "PATCH", method) + }) +} diff --git a/github/actions/fake/client.go b/github/actions/fake/client.go index fc2b75fb6c..1983180e5a 100644 --- a/github/actions/fake/client.go +++ b/github/actions/fake/client.go @@ -36,6 +36,18 @@ var defaultRunnerScaleSet = &actions.RunnerScaleSet{ Statistics: nil, } +var defaultUpdatedRunnerScaleSet = &actions.RunnerScaleSet{ + Id: 1, + Name: "testset", + RunnerGroupId: 2, + RunnerGroupName: "testgroup", + Labels: []actions.Label{{Type: "test", Name: "test"}}, + RunnerSetting: actions.RunnerSetting{}, + CreatedOn: time.Now(), + RunnerJitConfigUrl: "test.test.test", + Statistics: nil, +} + var defaultRunnerGroup = &actions.RunnerGroup{ ID: 1, Name: "testgroup", @@ -107,6 +119,10 @@ type FakeClient struct { *actions.RunnerScaleSet err error } + updateRunnerScaleSetResult struct { + *actions.RunnerScaleSet + err error + } createMessageSessionResult struct { *actions.RunnerScaleSetSession err error @@ -164,6 +180,7 @@ func (f *FakeClient) applyDefaults() { f.getRunnerScaleSetByIdResult.RunnerScaleSet = defaultRunnerScaleSet f.getRunnerGroupByNameResult.RunnerGroup = defaultRunnerGroup f.createRunnerScaleSetResult.RunnerScaleSet = defaultRunnerScaleSet + f.updateRunnerScaleSetResult.RunnerScaleSet = defaultUpdatedRunnerScaleSet f.createMessageSessionResult.RunnerScaleSetSession = defaultRunnerScaleSetSession f.refreshMessageSessionResult.RunnerScaleSetSession = defaultRunnerScaleSetSession f.acquireJobsResult.ids = []int64{1} @@ -190,6 +207,10 @@ func (f *FakeClient) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *a return f.createRunnerScaleSetResult.RunnerScaleSet, f.createRunnerScaleSetResult.err } +func (f *FakeClient) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, runnerScaleSet *actions.RunnerScaleSet) (*actions.RunnerScaleSet, error) { + return f.updateRunnerScaleSetResult.RunnerScaleSet, f.updateRunnerScaleSetResult.err +} + func (f *FakeClient) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) { return f.createMessageSessionResult.RunnerScaleSetSession, f.createMessageSessionResult.err } diff --git a/github/actions/mock_ActionsService.go b/github/actions/mock_ActionsService.go index 341dc5133b..d227e8be3e 100644 --- a/github/actions/mock_ActionsService.go +++ b/github/actions/mock_ActionsService.go @@ -332,6 +332,29 @@ func (_m *MockActionsService) RemoveRunner(ctx context.Context, runnerId int64) return r0 } +// UpdateRunnerScaleSet provides a mock function with given fields: ctx, runnerScaleSetId, runnerScaleSet +func (_m *MockActionsService) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) { + ret := _m.Called(ctx, runnerScaleSetId, runnerScaleSet) + + var r0 *RunnerScaleSet + if rf, ok := ret.Get(0).(func(context.Context, int, *RunnerScaleSet) *RunnerScaleSet); ok { + r0 = rf(ctx, runnerScaleSetId, runnerScaleSet) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*RunnerScaleSet) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int, *RunnerScaleSet) error); ok { + r1 = rf(ctx, runnerScaleSetId, runnerScaleSet) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + type mockConstructorTestingTNewMockActionsService interface { mock.TestingT Cleanup(func()) From 2307caffe2e50de65c5a61483fb7d5d024e0d0bc Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Fri, 27 Jan 2023 15:17:28 +0000 Subject: [PATCH 047/561] Update Validate ARC workflow to go 1.19 (#2220) --- .github/workflows/validate-arc.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/validate-arc.yaml b/.github/workflows/validate-arc.yaml index 73b5238ae4..e487f5fb1a 100644 --- a/.github/workflows/validate-arc.yaml +++ b/.github/workflows/validate-arc.yaml @@ -34,9 +34,9 @@ jobs: - name: Set-up Go uses: actions/setup-go@v3 with: - go-version: '1.18.2' + go-version: '1.19' check-latest: false - + - uses: actions/cache@v3 with: path: ~/go/pkg/mod From fc18e21abb6e260667bf898bc10de26eaa70be42 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Mon, 30 Jan 2023 08:37:26 -0500 Subject: [PATCH 048/561] Fix helm charts when pass values file. (#2222) --- charts/.ci/ct-config.yaml | 1 + .../templates/autoscalingrunnerset.yaml | 10 +++---- .../tests/template_test.go | 27 +++++++++++++++++++ .../auto-scaling-runner-set/tests/values.yaml | 5 ++++ 4 files changed, 38 insertions(+), 5 deletions(-) create mode 100644 charts/auto-scaling-runner-set/tests/values.yaml diff --git a/charts/.ci/ct-config.yaml b/charts/.ci/ct-config.yaml index 38351c90b7..28cbc0ab26 100644 --- a/charts/.ci/ct-config.yaml +++ b/charts/.ci/ct-config.yaml @@ -1,4 +1,5 @@ # This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow +all: true lint-conf: charts/.ci/lint-config.yaml chart-repos: - jetstack=https://charts.jetstack.io diff --git a/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml b/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml index 7794d4b321..e29ec157c5 100644 --- a/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml +++ b/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml @@ -12,21 +12,21 @@ spec: runnerGroup: {{ . }} {{- end }} - {{- if and (kindIs "int64" .Values.minRunners) (kindIs "int64" .Values.maxRunners) }} + {{- if and (or (kindIs "int64" .Values.minRunners) (kindIs "float64" .Values.minRunners)) (or (kindIs "int64" .Values.maxRunners) (kindIs "float64" .Values.maxRunners)) }} {{- if gt .Values.minRunners .Values.maxRunners }} {{- fail "maxRunners has to be greater or equal to minRunners" }} {{- end }} {{- end }} - {{- if kindIs "int64" .Values.maxRunners }} - {{- if lt .Values.maxRunners 0 }} + {{- if or (kindIs "int64" .Values.maxRunners) (kindIs "float64" .Values.maxRunners) }} + {{- if lt (.Values.maxRunners | int) 0 }} {{- fail "maxRunners has to be greater or equal to 0" }} {{- end }} maxRunners: {{ .Values.maxRunners | int }} {{- end }} - {{- if kindIs "int64" .Values.minRunners }} - {{- if lt .Values.minRunners 0 }} + {{- if or (kindIs "int64" .Values.minRunners) (kindIs "float64" .Values.minRunners) }} + {{- if lt (.Values.minRunners | int) 0 }} {{- fail "minRunners has to be greater or equal to 0" }} {{- end }} minRunners: {{ .Values.minRunners | int }} diff --git a/charts/auto-scaling-runner-set/tests/template_test.go b/charts/auto-scaling-runner-set/tests/template_test.go index 51a8840a10..099b24b601 100644 --- a/charts/auto-scaling-runner-set/tests/template_test.go +++ b/charts/auto-scaling-runner-set/tests/template_test.go @@ -495,6 +495,33 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMax(t assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil") } +func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunners_FromValuesFile(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + testValuesPath, err := filepath.Abs("../tests/values.yaml") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + ValuesFiles: []string{testValuesPath}, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, 5, *ars.Spec.MinRunners, "MinRunners should be 5") + assert.Equal(t, 10, *ars.Spec.MaxRunners, "MaxRunners should be 10") +} + func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { t.Parallel() diff --git a/charts/auto-scaling-runner-set/tests/values.yaml b/charts/auto-scaling-runner-set/tests/values.yaml new file mode 100644 index 0000000000..fc42555e78 --- /dev/null +++ b/charts/auto-scaling-runner-set/tests/values.yaml @@ -0,0 +1,5 @@ +githubConfigUrl: https://github.com/actions/actions-runner-controller +githubConfigSecret: + github_token: test +maxRunners: 10 +minRunners: 5 \ No newline at end of file From 9eee32cc98be8d1afc4eddb0379e04bd2c546ccd Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Mon, 30 Jan 2023 14:03:30 -0500 Subject: [PATCH 049/561] Skip CT when list-changed=false. (#2228) --- .github/workflows/validate-chart.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/validate-chart.yaml b/.github/workflows/validate-chart.yaml index 99fd267dfe..6eca19f466 100644 --- a/.github/workflows/validate-chart.yaml +++ b/.github/workflows/validate-chart.yaml @@ -78,5 +78,6 @@ jobs: helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait - name: Run chart-testing (install) + if: steps.list-changed.outputs.changed == 'true' run: | ct install --config charts/.ci/ct-config.yaml From 3cc5cce56463f3ddd00f101888d24339779da84b Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Tue, 31 Jan 2023 10:55:23 +0000 Subject: [PATCH 050/561] Remove network requests from actions.NewClient (#2219) Co-authored-by: Nikola Jokic --- cmd/githubrunnerscalesetlistener/main.go | 1 - github/actions/actions_server_test.go | 63 ++- github/actions/byte_order_mark_test.go | 61 ++ github/actions/client.go | 533 ++++++------------ github/actions/client_generate_jit_test.go | 3 +- github/actions/client_job_acquisition_test.go | 28 +- .../client_runner_scale_set_message_test.go | 38 +- .../client_runner_scale_set_session_test.go | 8 +- .../actions/client_runner_scale_set_test.go | 69 +-- github/actions/client_runner_test.go | 17 +- github/actions/client_tls_test.go | 31 +- github/actions/config.go | 98 ++++ github/actions/config_test.go | 117 ++++ github/actions/github_api_request_test.go | 171 ++++++ github/actions/multi_client.go | 1 - github/actions/url_test.go | 48 -- 16 files changed, 782 insertions(+), 505 deletions(-) create mode 100644 github/actions/byte_order_mark_test.go create mode 100644 github/actions/config.go create mode 100644 github/actions/config_test.go create mode 100644 github/actions/github_api_request_test.go delete mode 100644 github/actions/url_test.go diff --git a/cmd/githubrunnerscalesetlistener/main.go b/cmd/githubrunnerscalesetlistener/main.go index 0668459778..ed9f145f33 100644 --- a/cmd/githubrunnerscalesetlistener/main.go +++ b/cmd/githubrunnerscalesetlistener/main.go @@ -85,7 +85,6 @@ func run(rc RunnerScaleSetListenerConfig, logger logr.Logger) error { } actionsServiceClient, err := actions.NewClient( - ctx, rc.ConfigureUrl, creds, actions.WithUserAgent(fmt.Sprintf("actions-runner-controller/%s", build.Version)), diff --git a/github/actions/actions_server_test.go b/github/actions/actions_server_test.go index 8b66e45ff6..2638435ace 100644 --- a/github/actions/actions_server_test.go +++ b/github/actions/actions_server_test.go @@ -17,9 +17,20 @@ import ( // /actions/runner-registration endpoints will be handled by the provided // handler. The returned server is started and will be automatically closed // when the test ends. -func newActionsServer(t *testing.T, handler http.Handler) *actionsServer { - var u string - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +func newActionsServer(t *testing.T, handler http.Handler, options ...actionsServerOption) *actionsServer { + s := httptest.NewServer(nil) + server := &actionsServer{ + Server: s, + } + t.Cleanup(func() { + server.Close() + }) + + for _, option := range options { + option(server) + } + + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // handle getRunnerRegistrationToken if strings.HasSuffix(r.URL.Path, "/runners/registration-token") { w.WriteHeader(http.StatusCreated) @@ -29,41 +40,55 @@ func newActionsServer(t *testing.T, handler http.Handler) *actionsServer { // handle getActionsServiceAdminConnection if strings.HasSuffix(r.URL.Path, "/actions/runner-registration") { - claims := &jwt.RegisteredClaims{ - IssuedAt: jwt.NewNumericDate(time.Now().Add(-1 * time.Minute)), - ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Minute)), - Issuer: "123", + if server.token == "" { + server.token = defaultActionsToken(t) } - token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) - privateKey, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(samplePrivateKey)) - require.NoError(t, err) - tokenString, err := token.SignedString(privateKey) - require.NoError(t, err) - w.Write([]byte(`{"url":"` + u + `","token":"` + tokenString + `"}`)) + w.Write([]byte(`{"url":"` + s.URL + `/tenant/123/","token":"` + server.token + `"}`)) return } handler.ServeHTTP(w, r) - })) + }) - u = server.URL + server.Config.Handler = h - t.Cleanup(func() { - server.Close() - }) + return server +} - return &actionsServer{server} +type actionsServerOption func(*actionsServer) + +func withActionsToken(token string) actionsServerOption { + return func(s *actionsServer) { + s.token = token + } } type actionsServer struct { *httptest.Server + + token string } func (s *actionsServer) configURLForOrg(org string) string { return s.URL + "/" + org } +func defaultActionsToken(t *testing.T) string { + claims := &jwt.RegisteredClaims{ + IssuedAt: jwt.NewNumericDate(time.Now().Add(-10 * time.Minute)), + ExpiresAt: jwt.NewNumericDate(time.Now().Add(10 * time.Minute)), + Issuer: "123", + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) + privateKey, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(samplePrivateKey)) + require.NoError(t, err) + tokenString, err := token.SignedString(privateKey) + require.NoError(t, err) + return tokenString +} + const samplePrivateKey = `-----BEGIN RSA PRIVATE KEY----- MIICWgIBAAKBgHXfRT9cv9UY9fAAD4+1RshpfSSZe277urfEmPfX3/Og9zJYRk// CZrJVD1CaBZDiIyQsNEzjta7r4UsqWdFOggiNN2E7ZTFQjMSaFkVgrzHqWuiaCBf diff --git a/github/actions/byte_order_mark_test.go b/github/actions/byte_order_mark_test.go new file mode 100644 index 0000000000..107dd92a6e --- /dev/null +++ b/github/actions/byte_order_mark_test.go @@ -0,0 +1,61 @@ +package actions_test + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestClient_Do(t *testing.T) { + t.Run("trims byte order mark from response if present", func(t *testing.T) { + t.Run("when there is no body", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + })) + defer server.Close() + + client, err := actions.NewClient("https://localhost/org/repo", &actions.ActionsAuth{Token: "token"}) + require.NoError(t, err) + + req, err := http.NewRequest("GET", server.URL, nil) + require.NoError(t, err) + + resp, err := client.Do(req) + require.NoError(t, err) + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + assert.Empty(t, string(body)) + }) + + responses := []string{ + "\xef\xbb\xbf{\"foo\":\"bar\"}", + "{\"foo\":\"bar\"}", + } + + for _, response := range responses { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(response)) + })) + defer server.Close() + + client, err := actions.NewClient("https://localhost/org/repo", &actions.ActionsAuth{Token: "token"}) + require.NoError(t, err) + + req, err := http.NewRequest("GET", server.URL, nil) + require.NoError(t, err) + + resp, err := client.Do(req) + require.NoError(t, err) + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + assert.Equal(t, "{\"foo\":\"bar\"}", string(body)) + } + }) +} diff --git a/github/actions/client.go b/github/actions/client.go index fdadc70b8c..70b02b1489 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -12,9 +12,7 @@ import ( "log" "net/http" "net/url" - "path" "strconv" - "strings" "sync" "time" @@ -62,17 +60,17 @@ type Client struct { mu sync.Mutex // TODO: Convert to unexported fields once refactor of Listener is complete - ActionsServiceAdminToken *string - ActionsServiceAdminTokenExpiresAt *time.Time - ActionsServiceURL *string + ActionsServiceAdminToken string + ActionsServiceAdminTokenExpiresAt time.Time + ActionsServiceURL string retryMax int retryWaitMax time.Duration - creds *ActionsAuth - githubConfigURL string - logger logr.Logger - userAgent string + creds *ActionsAuth + config *GitHubConfig + logger logr.Logger + userAgent string rootCAs *x509.CertPool tlsInsecureSkipVerify bool @@ -116,11 +114,16 @@ func WithoutTLSVerify() ClientOption { } } -func NewClient(ctx context.Context, githubConfigURL string, creds *ActionsAuth, options ...ClientOption) (ActionsService, error) { +func NewClient(githubConfigURL string, creds *ActionsAuth, options ...ClientOption) (*Client, error) { + config, err := ParseGitHubConfigFromURL(githubConfigURL) + if err != nil { + return nil, fmt.Errorf("failed to parse githubConfigURL: %w", err) + } + ac := &Client{ - creds: creds, - githubConfigURL: githubConfigURL, - logger: logr.Discard(), + creds: creds, + config: config, + logger: logr.Discard(), // retryablehttp defaults retryMax: 4, @@ -132,9 +135,6 @@ func NewClient(ctx context.Context, githubConfigURL string, creds *ActionsAuth, } retryClient := retryablehttp.NewClient() - - // TODO: this silences retryclient default logger, do we want to provide one - // instead? by default retryablehttp logs all requests to stderr retryClient.Logger = log.New(io.Discard, "", log.LstdFlags) retryClient.RetryMax = ac.retryMax @@ -161,48 +161,94 @@ func NewClient(ctx context.Context, githubConfigURL string, creds *ActionsAuth, retryClient.HTTPClient.Transport = transport ac.Client = retryClient.StandardClient() - rt, err := ac.getRunnerRegistrationToken(ctx, githubConfigURL, *creds) + return ac, nil +} + +func (c *Client) Do(req *http.Request) (*http.Response, error) { + resp, err := c.Client.Do(req) if err != nil { - return nil, fmt.Errorf("failed to get runner registration token: %w", err) + return nil, err } - adminConnInfo, err := ac.getActionsServiceAdminConnection(ctx, rt, githubConfigURL) + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + err = resp.Body.Close() if err != nil { - return nil, fmt.Errorf("failed to get actions service admin connection: %w", err) + return nil, err } - ac.ActionsServiceURL = adminConnInfo.ActionsServiceUrl + body = trimByteOrderMark(body) + resp.Body = io.NopCloser(bytes.NewReader(body)) + return resp, nil +} - ac.mu.Lock() - defer ac.mu.Unlock() - ac.ActionsServiceAdminToken = adminConnInfo.AdminToken - ac.ActionsServiceAdminTokenExpiresAt, err = actionsServiceAdminTokenExpiresAt(*adminConnInfo.AdminToken) +func (c *Client) NewGitHubAPIRequest(ctx context.Context, method, path string, body io.Reader) (*http.Request, error) { + u := c.config.GitHubAPIURL(path) + req, err := http.NewRequestWithContext(ctx, method, u.String(), body) if err != nil { - return nil, fmt.Errorf("failed to get admin token expire at: %w", err) + return nil, err } - return ac, nil + if c.userAgent != "" { + req.Header.Set("User-Agent", c.userAgent) + } + + return req, nil } -func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*RunnerScaleSet, error) { - u := fmt.Sprintf("%s/%s?name=%s&api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetName) +func (c *Client) NewActionsServiceRequest(ctx context.Context, method, path string, body io.Reader) (*http.Request, error) { + err := c.updateTokenIfNeeded(ctx) + if err != nil { + return nil, err + } - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) + parsedPath, err := url.Parse(path) + if err != nil { + return nil, err } - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + urlString, err := url.JoinPath(c.ActionsServiceURL, parsedPath.Path) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) + u, err := url.Parse(urlString) + if err != nil { + return nil, err + } + + q := u.Query() + for k, v := range parsedPath.Query() { + q[k] = v + } + if q.Get("api-version") == "" { + q.Set("api-version", "6.0-preview") + } + u.RawQuery = q.Encode() + + req, err := http.NewRequestWithContext(ctx, method, u.String(), body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.ActionsServiceAdminToken)) if c.userAgent != "" { req.Header.Set("User-Agent", c.userAgent) } + return req, nil +} + +func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*RunnerScaleSet, error) { + path := fmt.Sprintf("/%s?name=%s", scaleSetEndpoint, runnerScaleSetName) + req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + resp, err := c.Do(req) if err != nil { return nil, err @@ -211,8 +257,9 @@ func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName strin if resp.StatusCode != http.StatusOK { return nil, ParseActionsErrorFromResponse(resp) } + var runnerScaleSetList *runnerScaleSetsResponse - err = unmarshalBody(resp, &runnerScaleSetList) + err = json.NewDecoder(resp.Body).Decode(&runnerScaleSetList) if err != nil { return nil, err } @@ -227,24 +274,12 @@ func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName strin } func (c *Client) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int) (*RunnerScaleSet, error) { - u := fmt.Sprintf("%s/%s/%d?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) - - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + path := fmt.Sprintf("/%s/%d", scaleSetEndpoint, runnerScaleSetId) + req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - resp, err := c.Do(req) if err != nil { return nil, err @@ -255,7 +290,7 @@ func (c *Client) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int } var runnerScaleSet *RunnerScaleSet - err = unmarshalBody(resp, &runnerScaleSet) + err = json.NewDecoder(resp.Body).Decode(&runnerScaleSet) if err != nil { return nil, err } @@ -263,24 +298,12 @@ func (c *Client) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int } func (c *Client) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (*RunnerGroup, error) { - u := fmt.Sprintf("%s/_apis/runtime/runnergroups/?groupName=%s&api-version=6.0-preview", *c.ActionsServiceURL, runnerGroup) - - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + path := fmt.Sprintf("/_apis/runtime/runnergroups/?groupName=%s", runnerGroup) + req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - resp, err := c.Do(req) if err != nil { return nil, err @@ -295,7 +318,7 @@ func (c *Client) GetRunnerGroupByName(ctx context.Context, runnerGroup string) ( } var runnerGroupList *RunnerGroupList - err = unmarshalBody(resp, &runnerGroupList) + err = json.NewDecoder(resp.Body).Decode(&runnerGroupList) if err != nil { return nil, err } @@ -312,29 +335,16 @@ func (c *Client) GetRunnerGroupByName(ctx context.Context, runnerGroup string) ( } func (c *Client) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) { - u := fmt.Sprintf("%s/%s?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint) - - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) - } - body, err := json.Marshal(runnerScaleSet) if err != nil { return nil, err } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, bytes.NewBuffer(body)) + req, err := c.NewActionsServiceRequest(ctx, http.MethodPost, scaleSetEndpoint, bytes.NewReader(body)) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - resp, err := c.Do(req) if err != nil { return nil, err @@ -344,7 +354,7 @@ func (c *Client) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *Runne return nil, ParseActionsErrorFromResponse(resp) } var createdRunnerScaleSet *RunnerScaleSet - err = unmarshalBody(resp, &createdRunnerScaleSet) + err = json.NewDecoder(resp.Body).Decode(&createdRunnerScaleSet) if err != nil { return nil, err } @@ -352,29 +362,18 @@ func (c *Client) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *Runne } func (c *Client) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) { - u := fmt.Sprintf("%s/%s/%d?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) - - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) - } + path := fmt.Sprintf("%s/%d", scaleSetEndpoint, runnerScaleSetId) body, err := json.Marshal(runnerScaleSet) if err != nil { return nil, err } - req, err := http.NewRequestWithContext(ctx, http.MethodPatch, u, bytes.NewBuffer(body)) + req, err := c.NewActionsServiceRequest(ctx, http.MethodPatch, path, bytes.NewReader(body)) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - resp, err := c.Do(req) if err != nil { return nil, err @@ -385,7 +384,7 @@ func (c *Client) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, } var updatedRunnerScaleSet *RunnerScaleSet - err = unmarshalBody(resp, &updatedRunnerScaleSet) + err = json.NewDecoder(resp.Body).Decode(&updatedRunnerScaleSet) if err != nil { return nil, err } @@ -393,24 +392,12 @@ func (c *Client) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, } func (c *Client) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int) error { - u := fmt.Sprintf("%s/%s/%d?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) - - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return fmt.Errorf("failed to refresh admin token if needed: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, http.MethodDelete, u, nil) + path := fmt.Sprintf("/%s/%d", scaleSetEndpoint, runnerScaleSetId) + req, err := c.NewActionsServiceRequest(ctx, http.MethodDelete, path, nil) if err != nil { return err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - resp, err := c.Do(req) if err != nil { return err @@ -425,12 +412,18 @@ func (c *Client) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int) } func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*RunnerScaleSetMessage, error) { - u := messageQueueUrl + u, err := url.Parse(messageQueueUrl) + if err != nil { + return nil, err + } + if lastMessageId > 0 { - u = fmt.Sprintf("%s&lassMessageId=%d", messageQueueUrl, lastMessageId) + q := u.Query() + q.Set("lastMessageId", strconv.FormatInt(lastMessageId, 10)) + u.RawQuery = q.Encode() } - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) if err != nil { return nil, err } @@ -466,7 +459,7 @@ func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAc } var message *RunnerScaleSetMessage - err = unmarshalBody(resp, &message) + err = json.NewDecoder(resp.Body).Decode(&message) if err != nil { return nil, err } @@ -514,7 +507,7 @@ func (c *Client) DeleteMessage(ctx context.Context, messageQueueUrl, messageQueu } func (c *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*RunnerScaleSetSession, error) { - u := fmt.Sprintf("%v/%v/%v/sessions?%v", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId, apiVersionQueryParam) + path := fmt.Sprintf("/%s/%d/sessions", scaleSetEndpoint, runnerScaleSetId) newSession := &RunnerScaleSetSession{ OwnerName: owner, @@ -527,49 +520,36 @@ func (c *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, createdSession := &RunnerScaleSetSession{} - err = c.doSessionRequest(ctx, http.MethodPost, u, bytes.NewBuffer(requestData), http.StatusOK, createdSession) + err = c.doSessionRequest(ctx, http.MethodPost, path, bytes.NewBuffer(requestData), http.StatusOK, createdSession) return createdSession, err } func (c *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error { - u := fmt.Sprintf("%v/%v/%v/sessions/%v?%v", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId, sessionId.String(), apiVersionQueryParam) - - return c.doSessionRequest(ctx, http.MethodDelete, u, nil, http.StatusNoContent, nil) + path := fmt.Sprintf("/%s/%d/sessions/%s", scaleSetEndpoint, runnerScaleSetId, sessionId.String()) + return c.doSessionRequest(ctx, http.MethodDelete, path, nil, http.StatusNoContent, nil) } func (c *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*RunnerScaleSetSession, error) { - u := fmt.Sprintf("%v/%v/%v/sessions/%v?%v", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId, sessionId.String(), apiVersionQueryParam) + path := fmt.Sprintf("/%s/%d/sessions/%s", scaleSetEndpoint, runnerScaleSetId, sessionId.String()) refreshedSession := &RunnerScaleSetSession{} - err := c.doSessionRequest(ctx, http.MethodPatch, u, nil, http.StatusOK, refreshedSession) + err := c.doSessionRequest(ctx, http.MethodPatch, path, nil, http.StatusOK, refreshedSession) return refreshedSession, err } -func (c *Client) doSessionRequest(ctx context.Context, method, url string, requestData io.Reader, expectedResponseStatusCode int, responseUnmarshalTarget any) error { - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return fmt.Errorf("failed to refresh admin token if needed: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, method, url, requestData) +func (c *Client) doSessionRequest(ctx context.Context, method, path string, requestData io.Reader, expectedResponseStatusCode int, responseUnmarshalTarget any) error { + req, err := c.NewActionsServiceRequest(ctx, method, path, requestData) if err != nil { return err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - resp, err := c.Do(req) if err != nil { return err } if resp.StatusCode == expectedResponseStatusCode && responseUnmarshalTarget != nil { - err = unmarshalBody(resp, &responseUnmarshalTarget) - return err + return json.NewDecoder(resp.Body).Decode(responseUnmarshalTarget) } if resp.StatusCode >= 400 && resp.StatusCode < 500 { @@ -587,7 +567,7 @@ func (c *Client) doSessionRequest(ctx context.Context, method, url string, reque } func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) { - u := fmt.Sprintf("%s/%s/%d/acquirejobs?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) + u := fmt.Sprintf("%s/%s/%d/acquirejobs?api-version=6.0-preview", c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) body, err := json.Marshal(requestIds) if err != nil { @@ -614,8 +594,8 @@ func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQ return nil, ParseActionsErrorFromResponse(resp) } - var acquiredJobs Int64List - err = unmarshalBody(resp, &acquiredJobs) + var acquiredJobs *Int64List + err = json.NewDecoder(resp.Body).Decode(&acquiredJobs) if err != nil { return nil, err } @@ -624,24 +604,13 @@ func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQ } func (c *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*AcquirableJobList, error) { - u := fmt.Sprintf("%s/%s/%d/acquirablejobs?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, runnerScaleSetId) - - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) - } + path := fmt.Sprintf("/%s/%d/acquirablejobs", scaleSetEndpoint, runnerScaleSetId) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - resp, err := c.Do(req) if err != nil { return nil, err @@ -657,7 +626,7 @@ func (c *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (* } var acquirableJobList *AcquirableJobList - err = unmarshalBody(resp, &acquirableJobList) + err = json.NewDecoder(resp.Body).Decode(&acquirableJobList) if err != nil { return nil, err } @@ -666,28 +635,18 @@ func (c *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (* } func (c *Client) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *RunnerScaleSetJitRunnerSetting, scaleSetId int) (*RunnerScaleSetJitRunnerConfig, error) { - runnerJitConfigUrl := fmt.Sprintf("%s/%s/%d/generatejitconfig?api-version=6.0-preview", *c.ActionsServiceURL, scaleSetEndpoint, scaleSetId) - - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) - } + path := fmt.Sprintf("/%s/%d/generatejitconfig", scaleSetEndpoint, scaleSetId) body, err := json.Marshal(jitRunnerSetting) if err != nil { return nil, err } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, runnerJitConfigUrl, bytes.NewBuffer(body)) + req, err := c.NewActionsServiceRequest(ctx, http.MethodPost, path, bytes.NewBuffer(body)) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - resp, err := c.Do(req) if err != nil { return nil, err @@ -698,7 +657,7 @@ func (c *Client) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting * } var runnerJitConfig *RunnerScaleSetJitRunnerConfig - err = unmarshalBody(resp, &runnerJitConfig) + err = json.NewDecoder(resp.Body).Decode(&runnerJitConfig) if err != nil { return nil, err } @@ -706,24 +665,13 @@ func (c *Client) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting * } func (c *Client) GetRunner(ctx context.Context, runnerId int64) (*RunnerReference, error) { - url := fmt.Sprintf("%v/%v/%v?%v", *c.ActionsServiceURL, runnerEndpoint, runnerId, apiVersionQueryParam) + path := fmt.Sprintf("/%s/%d", runnerEndpoint, runnerId) - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - resp, err := c.Do(req) if err != nil { return nil, err @@ -734,7 +682,8 @@ func (c *Client) GetRunner(ctx context.Context, runnerId int64) (*RunnerReferenc } var runnerReference *RunnerReference - if err := unmarshalBody(resp, &runnerReference); err != nil { + err = json.NewDecoder(resp.Body).Decode(&runnerReference) + if err != nil { return nil, err } @@ -742,24 +691,13 @@ func (c *Client) GetRunner(ctx context.Context, runnerId int64) (*RunnerReferenc } func (c *Client) GetRunnerByName(ctx context.Context, runnerName string) (*RunnerReference, error) { - url := fmt.Sprintf("%v/%v?agentName=%v&%v", *c.ActionsServiceURL, runnerEndpoint, runnerName, apiVersionQueryParam) - - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return nil, fmt.Errorf("failed to refresh admin token if needed: %w", err) - } + path := fmt.Sprintf("/%s?agentName=%s", runnerEndpoint, runnerName) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - resp, err := c.Do(req) if err != nil { return nil, err @@ -770,7 +708,7 @@ func (c *Client) GetRunnerByName(ctx context.Context, runnerName string) (*Runne } var runnerList *RunnerReferenceList - err = unmarshalBody(resp, &runnerList) + err = json.NewDecoder(resp.Body).Decode(&runnerList) if err != nil { return nil, err } @@ -787,24 +725,13 @@ func (c *Client) GetRunnerByName(ctx context.Context, runnerName string) (*Runne } func (c *Client) RemoveRunner(ctx context.Context, runnerId int64) error { - url := fmt.Sprintf("%v/%v/%v?%v", *c.ActionsServiceURL, runnerEndpoint, runnerId, apiVersionQueryParam) - - if err := c.refreshTokenIfNeeded(ctx); err != nil { - return fmt.Errorf("failed to refresh admin token if needed: %w", err) - } + path := fmt.Sprintf("/%s/%d", runnerEndpoint, runnerId) - req, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil) + req, err := c.NewActionsServiceRequest(ctx, http.MethodDelete, path, nil) if err != nil { return err } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", *c.ActionsServiceAdminToken)) - - if c.userAgent != "" { - req.Header.Set("User-Agent", c.userAgent) - } - resp, err := c.Do(req) if err != nil { return err @@ -823,25 +750,25 @@ type registrationToken struct { ExpiresAt *time.Time `json:"expires_at,omitempty"` } -func (c *Client) getRunnerRegistrationToken(ctx context.Context, githubConfigUrl string, creds ActionsAuth) (*registrationToken, error) { - registrationTokenURL, err := createRegistrationTokenURL(githubConfigUrl) +func (c *Client) getRunnerRegistrationToken(ctx context.Context) (*registrationToken, error) { + path, err := createRegistrationTokenPath(c.config) if err != nil { return nil, err } var buf bytes.Buffer - req, err := http.NewRequestWithContext(ctx, http.MethodPost, registrationTokenURL, &buf) + req, err := c.NewGitHubAPIRequest(ctx, http.MethodPost, path, &buf) if err != nil { return nil, err } bearerToken := "" - if creds.Token != "" { - encodedToken := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("github:%v", creds.Token))) + if c.creds.Token != "" { + encodedToken := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("github:%v", c.creds.Token))) bearerToken = fmt.Sprintf("Basic %v", encodedToken) } else { - accessToken, err := c.fetchAccessToken(ctx, githubConfigUrl, creds.AppCreds) + accessToken, err := c.fetchAccessToken(ctx, c.config.ConfigURL.String(), c.creds.AppCreds) if err != nil { return nil, err } @@ -851,9 +778,8 @@ func (c *Client) getRunnerRegistrationToken(ctx context.Context, githubConfigUrl req.Header.Set("Content-Type", "application/vnd.github.v3+json") req.Header.Set("Authorization", bearerToken) - req.Header.Set("User-Agent", c.userAgent) - c.logger.Info("getting runner registration token", "registrationTokenURL", registrationTokenURL) + c.logger.Info("getting runner registration token", "registrationTokenURL", req.URL.String()) resp, err := c.Do(req) if err != nil { @@ -869,8 +795,8 @@ func (c *Client) getRunnerRegistrationToken(ctx context.Context, githubConfigUrl return nil, fmt.Errorf("unexpected response from Actions service during registration token call: %v - %v", resp.StatusCode, string(body)) } - registrationToken := ®istrationToken{} - if err := json.NewDecoder(resp.Body).Decode(registrationToken); err != nil { + var registrationToken *registrationToken + if err := json.NewDecoder(resp.Body).Decode(®istrationToken); err != nil { return nil, err } @@ -889,21 +815,16 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c return nil, err } - u, err := githubAPIURL(gitHubConfigURL, fmt.Sprintf("/app/installations/%v/access_tokens", creds.AppInstallationID)) - if err != nil { - return nil, err - } - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, nil) + path := fmt.Sprintf("/app/installations/%v/access_tokens", creds.AppInstallationID) + req, err := c.NewGitHubAPIRequest(ctx, http.MethodPost, path, nil) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/vnd.github+json") req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", accessTokenJWT)) - req.Header.Add("User-Agent", c.userAgent) - c.logger.Info("getting access token for GitHub App auth", "accessTokenURL", u) + c.logger.Info("getting access token for GitHub App auth", "accessTokenURL", req.URL.String()) resp, err := c.Do(req) if err != nil { @@ -912,8 +833,8 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c defer resp.Body.Close() // Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app - accessToken := &accessToken{} - err = json.NewDecoder(resp.Body).Decode(accessToken) + var accessToken *accessToken + err = json.NewDecoder(resp.Body).Decode(&accessToken) return accessToken, err } @@ -922,27 +843,14 @@ type ActionsServiceAdminConnection struct { AdminToken *string `json:"token,omitempty"` } -func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *registrationToken, githubConfigUrl string) (*ActionsServiceAdminConnection, error) { - parsedGitHubConfigURL, err := url.Parse(githubConfigUrl) - if err != nil { - return nil, err - } - - if isHostedServer(*parsedGitHubConfigURL) { - parsedGitHubConfigURL.Host = fmt.Sprintf("api.%v", parsedGitHubConfigURL.Host) - } - - ru := fmt.Sprintf("%v://%v/actions/runner-registration", parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host) - registrationURL, err := url.Parse(ru) - if err != nil { - return nil, err - } +func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *registrationToken) (*ActionsServiceAdminConnection, error) { + path := "/actions/runner-registration" body := struct { Url string `json:"url"` RunnerEvent string `json:"runner_event"` }{ - Url: githubConfigUrl, + Url: c.config.ConfigURL.String(), RunnerEvent: "register", } @@ -954,16 +862,15 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis return nil, err } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, registrationURL.String(), buf) + req, err := c.NewGitHubAPIRequest(ctx, http.MethodPost, path, buf) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", fmt.Sprintf("RemoteAuth %s", *rt.Token)) - req.Header.Set("User-Agent", c.userAgent) - c.logger.Info("getting Actions tenant URL and JWT", "registrationURL", registrationURL.String()) + c.logger.Info("getting Actions tenant URL and JWT", "registrationURL", req.URL.String()) resp, err := c.Do(req) if err != nil { @@ -971,65 +878,30 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis } defer resp.Body.Close() - actionsServiceAdminConnection := &ActionsServiceAdminConnection{} - if err := json.NewDecoder(resp.Body).Decode(actionsServiceAdminConnection); err != nil { + var actionsServiceAdminConnection *ActionsServiceAdminConnection + if err := json.NewDecoder(resp.Body).Decode(&actionsServiceAdminConnection); err != nil { return nil, err } return actionsServiceAdminConnection, nil } -func isHostedServer(gitHubURL url.URL) bool { - return gitHubURL.Host == "github.com" || - gitHubURL.Host == "www.github.com" || - gitHubURL.Host == "github.localhost" -} - -func createRegistrationTokenURL(githubConfigUrl string) (string, error) { - parsedGitHubConfigURL, err := url.Parse(githubConfigUrl) - if err != nil { - return "", err - } - - // Check for empty path before split, because strings.Split will return a slice of length 1 - // when the split delimiter is not present. - trimmedPath := strings.TrimLeft(parsedGitHubConfigURL.Path, "/") - if len(trimmedPath) == 0 { - return "", fmt.Errorf("%q should point to an enterprise, org, or repository", parsedGitHubConfigURL.String()) - } - - pathParts := strings.Split(path.Clean(strings.TrimLeft(parsedGitHubConfigURL.Path, "/")), "/") - - switch len(pathParts) { - case 1: // Organization - registrationTokenURL := fmt.Sprintf( - "%v://%v/api/v3/orgs/%v/actions/runners/registration-token", - parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host, pathParts[0]) - - if isHostedServer(*parsedGitHubConfigURL) { - registrationTokenURL = fmt.Sprintf( - "%v://api.%v/orgs/%v/actions/runners/registration-token", - parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host, pathParts[0]) - } - - return registrationTokenURL, nil - case 2: // Repository or enterprise - repoScope := "repos/" - if strings.ToLower(pathParts[0]) == "enterprises" { - repoScope = "" - } +func createRegistrationTokenPath(config *GitHubConfig) (string, error) { + switch config.Scope { + case GitHubScopeOrganization: + path := fmt.Sprintf("/orgs/%s/actions/runners/registration-token", config.Organization) + return path, nil - registrationTokenURL := fmt.Sprintf("%v://%v/api/v3/%v%v/%v/actions/runners/registration-token", - parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host, repoScope, pathParts[0], pathParts[1]) + case GitHubScopeEnterprise: + path := fmt.Sprintf("/enterprises/%s/actions/runners/registration-token", config.Enterprise) + return path, nil - if isHostedServer(*parsedGitHubConfigURL) { - registrationTokenURL = fmt.Sprintf("%v://api.%v/%v%v/%v/actions/runners/registration-token", - parsedGitHubConfigURL.Scheme, parsedGitHubConfigURL.Host, repoScope, pathParts[0], pathParts[1]) - } + case GitHubScopeRepository: + path := fmt.Sprintf("/repos/%s/%s/actions/runners/registration-token", config.Organization, config.Repository) + return path, nil - return registrationTokenURL, nil default: - return "", fmt.Errorf("%q should point to an enterprise, org, or repository", parsedGitHubConfigURL.String()) + return "", fmt.Errorf("unknown scope for config url: %s", config.ConfigURL) } } @@ -1057,68 +929,50 @@ func createJWTForGitHubApp(appAuth *GitHubAppAuth) (string, error) { return token.SignedString(privateKey) } -func unmarshalBody(response *http.Response, v interface{}) (err error) { - if response != nil && response.Body != nil { - var err error - defer func() { - if closeError := response.Body.Close(); closeError != nil { - err = closeError - } - }() - body, err := io.ReadAll(response.Body) - if err != nil { - return err - } - body = trimByteOrderMark(body) - return json.Unmarshal(body, &v) - } - return nil -} - // Returns slice of body without utf-8 byte order mark. // If BOM does not exist body is returned unchanged. func trimByteOrderMark(body []byte) []byte { return bytes.TrimPrefix(body, []byte("\xef\xbb\xbf")) } -func actionsServiceAdminTokenExpiresAt(jwtToken string) (*time.Time, error) { +func actionsServiceAdminTokenExpiresAt(jwtToken string) (time.Time, error) { type JwtClaims struct { jwt.RegisteredClaims } token, _, err := jwt.NewParser().ParseUnverified(jwtToken, &JwtClaims{}) if err != nil { - return nil, fmt.Errorf("failed to parse jwt token: %w", err) + return time.Time{}, fmt.Errorf("failed to parse jwt token: %w", err) } if claims, ok := token.Claims.(*JwtClaims); ok { - return &claims.ExpiresAt.Time, nil + return claims.ExpiresAt.Time, nil } - return nil, fmt.Errorf("failed to parse token claims to get expire at") + return time.Time{}, fmt.Errorf("failed to parse token claims to get expire at") } -func (c *Client) refreshTokenIfNeeded(ctx context.Context) error { +func (c *Client) updateTokenIfNeeded(ctx context.Context) error { c.mu.Lock() defer c.mu.Unlock() - aboutToExpire := time.Now().Add(60 * time.Second).After(*c.ActionsServiceAdminTokenExpiresAt) - if !aboutToExpire { + aboutToExpire := time.Now().Add(60 * time.Second).After(c.ActionsServiceAdminTokenExpiresAt) + if !aboutToExpire && !c.ActionsServiceAdminTokenExpiresAt.IsZero() { return nil } - c.logger.Info("Admin token is about to expire, refreshing it", "githubConfigUrl", c.githubConfigURL) - rt, err := c.getRunnerRegistrationToken(ctx, c.githubConfigURL, *c.creds) + c.logger.Info("refreshing token", "githubConfigUrl", c.config.ConfigURL.String()) + rt, err := c.getRunnerRegistrationToken(ctx) if err != nil { - return fmt.Errorf("failed to get runner registration token on fresh: %w", err) + return fmt.Errorf("failed to get runner registration token on refresh: %w", err) } - adminConnInfo, err := c.getActionsServiceAdminConnection(ctx, rt, c.githubConfigURL) + adminConnInfo, err := c.getActionsServiceAdminConnection(ctx, rt) if err != nil { - return fmt.Errorf("failed to get actions service admin connection on fresh: %w", err) + return fmt.Errorf("failed to get actions service admin connection on refresh: %w", err) } - c.ActionsServiceURL = adminConnInfo.ActionsServiceUrl - c.ActionsServiceAdminToken = adminConnInfo.AdminToken + c.ActionsServiceURL = *adminConnInfo.ActionsServiceUrl + c.ActionsServiceAdminToken = *adminConnInfo.AdminToken c.ActionsServiceAdminTokenExpiresAt, err = actionsServiceAdminTokenExpiresAt(*adminConnInfo.AdminToken) if err != nil { return fmt.Errorf("failed to get admin token expire at on refresh: %w", err) @@ -1126,32 +980,3 @@ func (c *Client) refreshTokenIfNeeded(ctx context.Context) error { return nil } - -func githubAPIURL(configURL, path string) (string, error) { - u, err := url.Parse(configURL) - if err != nil { - return "", err - } - - result := &url.URL{ - Scheme: u.Scheme, - } - - switch u.Host { - // Hosted - case "github.com", "github.localhost": - result.Host = fmt.Sprintf("api.%s", u.Host) - // re-routing www.github.com to api.github.com - case "www.github.com": - result.Host = "api.github.com" - - // Enterprise - default: - result.Host = u.Host - result.Path = "/api/v3" - } - - result.Path += path - - return result.String(), nil -} diff --git a/github/actions/client_generate_jit_test.go b/github/actions/client_generate_jit_test.go index cf594151b9..94f9d53746 100644 --- a/github/actions/client_generate_jit_test.go +++ b/github/actions/client_generate_jit_test.go @@ -26,7 +26,7 @@ func TestGenerateJitRunnerConfig(t *testing.T) { server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write(response) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GenerateJitRunnerConfig(ctx, runnerSettings, 1) @@ -47,7 +47,6 @@ func TestGenerateJitRunnerConfig(t *testing.T) { })) client, err := actions.NewClient( - ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(1), diff --git a/github/actions/client_job_acquisition_test.go b/github/actions/client_job_acquisition_test.go index dfd0d58dad..e11b7ba266 100644 --- a/github/actions/client_job_acquisition_test.go +++ b/github/actions/client_job_acquisition_test.go @@ -3,6 +3,7 @@ package actions_test import ( "context" "net/http" + "strings" "testing" "time" @@ -27,11 +28,19 @@ func TestAcquireJobs(t *testing.T) { } requestIDs := want - server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, "/acquirablejobs") { + w.Write([]byte(`{"count": 1}`)) + return + } + w.Write(response) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.GetAcquirableJobs(ctx, 1) require.NoError(t, err) got, err := client.AcquireJobs(ctx, session.RunnerScaleSet.Id, session.MessageQueueAccessToken, requestIDs) @@ -50,13 +59,17 @@ func TestAcquireJobs(t *testing.T) { actualRetry := 0 expectedRetry := retryMax + 1 - server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, "/acquirablejobs") { + w.Write([]byte(`{"count": 1}`)) + return + } + w.WriteHeader(http.StatusServiceUnavailable) actualRetry++ })) client, err := actions.NewClient( - ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), @@ -64,6 +77,9 @@ func TestAcquireJobs(t *testing.T) { ) require.NoError(t, err) + _, err = client.GetAcquirableJobs(ctx, 1) + require.NoError(t, err) + _, err = client.AcquireJobs(context.Background(), session.RunnerScaleSet.Id, session.MessageQueueAccessToken, requestIDs) assert.NotNil(t, err) assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) @@ -71,7 +87,6 @@ func TestAcquireJobs(t *testing.T) { } func TestGetAcquirableJobs(t *testing.T) { - ctx := context.Background() auth := &actions.ActionsAuth{ Token: "token", } @@ -86,7 +101,7 @@ func TestGetAcquirableJobs(t *testing.T) { w.Write(response) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GetAcquirableJobs(context.Background(), runnerScaleSet.Id) @@ -108,7 +123,6 @@ func TestGetAcquirableJobs(t *testing.T) { })) client, err := actions.NewClient( - context.Background(), server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), diff --git a/github/actions/client_runner_scale_set_message_test.go b/github/actions/client_runner_scale_set_message_test.go index 55e80267a4..0de7709414 100644 --- a/github/actions/client_runner_scale_set_message_test.go +++ b/github/actions/client_runner_scale_set_message_test.go @@ -32,7 +32,7 @@ func TestGetMessage(t *testing.T) { w.Write(response) })) - client, err := actions.NewClient(ctx, s.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(s.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GetMessage(ctx, s.URL, token, 0) @@ -40,6 +40,23 @@ func TestGetMessage(t *testing.T) { assert.Equal(t, want, got) }) + t.Run("GetMessage sets the last message id if not 0", func(t *testing.T) { + want := runnerScaleSetMessage + response := []byte(`{"messageId":1,"messageType":"rssType"}`) + s := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + assert.Equal(t, "1", q.Get("lastMessageId")) + w.Write(response) + })) + + client, err := actions.NewClient(s.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + got, err := client.GetMessage(ctx, s.URL, token, 1) + require.NoError(t, err) + assert.Equal(t, want, got) + }) + t.Run("Default retries on server error", func(t *testing.T) { retryMax := 1 @@ -52,7 +69,6 @@ func TestGetMessage(t *testing.T) { })) client, err := actions.NewClient( - ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), @@ -70,7 +86,7 @@ func TestGetMessage(t *testing.T) { w.WriteHeader(http.StatusUnauthorized) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.GetMessage(ctx, server.URL, token, 0) @@ -78,8 +94,7 @@ func TestGetMessage(t *testing.T) { var expectedErr *actions.MessageQueueTokenExpiredError require.True(t, errors.As(err, &expectedErr)) - }, - ) + }) t.Run("Status code not found", func(t *testing.T) { want := actions.ActionsError{ @@ -90,7 +105,7 @@ func TestGetMessage(t *testing.T) { w.WriteHeader(http.StatusNotFound) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.GetMessage(ctx, server.URL, token, 0) @@ -104,7 +119,7 @@ func TestGetMessage(t *testing.T) { w.Header().Set("Content-Type", "text/plain") })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.GetMessage(ctx, server.URL, token, 0) @@ -129,7 +144,7 @@ func TestDeleteMessage(t *testing.T) { w.WriteHeader(http.StatusNoContent) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) err = client.DeleteMessage(ctx, server.URL, token, runnerScaleSetMessage.MessageId) @@ -141,7 +156,7 @@ func TestDeleteMessage(t *testing.T) { w.WriteHeader(http.StatusUnauthorized) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) err = client.DeleteMessage(ctx, server.URL, token, 0) @@ -156,7 +171,7 @@ func TestDeleteMessage(t *testing.T) { w.Header().Set("Content-Type", "text/plain") })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) err = client.DeleteMessage(ctx, server.URL, token, runnerScaleSetMessage.MessageId) @@ -175,7 +190,6 @@ func TestDeleteMessage(t *testing.T) { retryMax := 1 client, err := actions.NewClient( - ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), @@ -197,7 +211,7 @@ func TestDeleteMessage(t *testing.T) { w.Write(rsl) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) err = client.DeleteMessage(ctx, server.URL, token, runnerScaleSetMessage.MessageId+1) diff --git a/github/actions/client_runner_scale_set_session_test.go b/github/actions/client_runner_scale_set_session_test.go index f5fbceb76b..7b2ab69d52 100644 --- a/github/actions/client_runner_scale_set_session_test.go +++ b/github/actions/client_runner_scale_set_session_test.go @@ -51,7 +51,7 @@ func TestCreateMessageSession(t *testing.T) { w.Write(resp) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.CreateMessageSession(ctx, runnerScaleSet.Id, owner) @@ -81,7 +81,7 @@ func TestCreateMessageSession(t *testing.T) { w.Write(resp) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.CreateMessageSession(ctx, runnerScaleSet.Id, owner) @@ -120,7 +120,6 @@ func TestCreateMessageSession(t *testing.T) { wantRetries := retryMax + 1 client, err := actions.NewClient( - ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), @@ -160,7 +159,6 @@ func TestDeleteMessageSession(t *testing.T) { wantRetries := retryMax + 1 client, err := actions.NewClient( - ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), @@ -177,7 +175,6 @@ func TestDeleteMessageSession(t *testing.T) { } func TestRefreshMessageSession(t *testing.T) { - ctx := context.Background() auth := &actions.ActionsAuth{ Token: "token", } @@ -202,7 +199,6 @@ func TestRefreshMessageSession(t *testing.T) { wantRetries := retryMax + 1 client, err := actions.NewClient( - ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), diff --git a/github/actions/client_runner_scale_set_test.go b/github/actions/client_runner_scale_set_test.go index 980b9846d3..5354a0e584 100644 --- a/github/actions/client_runner_scale_set_test.go +++ b/github/actions/client_runner_scale_set_test.go @@ -31,7 +31,7 @@ func TestGetRunnerScaleSet(t *testing.T) { w.Write(runnerScaleSetsResp) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GetRunnerScaleSet(ctx, scaleSetName) @@ -47,15 +47,16 @@ func TestGetRunnerScaleSet(t *testing.T) { url = *r.URL })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.GetRunnerScaleSet(ctx, scaleSetName) require.NoError(t, err) - u := url.String() - expectedUrl := fmt.Sprintf("/_apis/runtime/runnerscalesets?name=%s&api-version=6.0-preview", scaleSetName) - assert.Equal(t, expectedUrl, u) + expectedPath := "/tenant/123/_apis/runtime/runnerscalesets" + assert.Equal(t, expectedPath, url.Path) + assert.Equal(t, scaleSetName, url.Query().Get("name")) + assert.Equal(t, "6.0-preview", url.Query().Get("api-version")) }) t.Run("Status code not found", func(t *testing.T) { @@ -63,7 +64,7 @@ func TestGetRunnerScaleSet(t *testing.T) { w.WriteHeader(http.StatusNotFound) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.GetRunnerScaleSet(ctx, scaleSetName) @@ -76,7 +77,7 @@ func TestGetRunnerScaleSet(t *testing.T) { w.Header().Set("Content-Type", "text/plain") })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.GetRunnerScaleSet(ctx, scaleSetName) @@ -94,7 +95,6 @@ func TestGetRunnerScaleSet(t *testing.T) { retryWaitMax := 1 * time.Microsecond client, err := actions.NewClient( - ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), @@ -115,7 +115,7 @@ func TestGetRunnerScaleSet(t *testing.T) { w.Write(runnerScaleSetsResp) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GetRunnerScaleSet(ctx, scaleSetName) @@ -130,7 +130,7 @@ func TestGetRunnerScaleSet(t *testing.T) { w.Write(runnerScaleSetsResp) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.GetRunnerScaleSet(ctx, scaleSetName) @@ -156,7 +156,7 @@ func TestGetRunnerScaleSetById(t *testing.T) { w.Write(rsl) })) - client, err := actions.NewClient(ctx, sservere.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(sservere.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GetRunnerScaleSetById(ctx, runnerScaleSet.Id) @@ -174,15 +174,15 @@ func TestGetRunnerScaleSetById(t *testing.T) { url = *r.URL })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.GetRunnerScaleSetById(ctx, runnerScaleSet.Id) require.NoError(t, err) - u := url.String() - expectedUrl := fmt.Sprintf("/_apis/runtime/runnerscalesets/%d?api-version=6.0-preview", runnerScaleSet.Id) - assert.Equal(t, expectedUrl, u) + expectedPath := fmt.Sprintf("/tenant/123/_apis/runtime/runnerscalesets/%d", runnerScaleSet.Id) + assert.Equal(t, expectedPath, url.Path) + assert.Equal(t, "6.0-preview", url.Query().Get("api-version")) }) t.Run("Status code not found", func(t *testing.T) { @@ -190,7 +190,7 @@ func TestGetRunnerScaleSetById(t *testing.T) { w.WriteHeader(http.StatusNotFound) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.GetRunnerScaleSetById(ctx, runnerScaleSet.Id) @@ -203,7 +203,7 @@ func TestGetRunnerScaleSetById(t *testing.T) { w.Header().Set("Content-Type", "text/plain") })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.GetRunnerScaleSetById(ctx, runnerScaleSet.Id) @@ -220,7 +220,6 @@ func TestGetRunnerScaleSetById(t *testing.T) { retryMax := 1 retryWaitMax := 1 * time.Microsecond client, err := actions.NewClient( - ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), @@ -242,7 +241,7 @@ func TestGetRunnerScaleSetById(t *testing.T) { w.Write(rsl) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GetRunnerScaleSetById(ctx, runnerScaleSet.Id) @@ -268,7 +267,7 @@ func TestCreateRunnerScaleSet(t *testing.T) { w.Write(rsl) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.CreateRunnerScaleSet(ctx, &runnerScaleSet) @@ -285,15 +284,15 @@ func TestCreateRunnerScaleSet(t *testing.T) { url = *r.URL })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.CreateRunnerScaleSet(ctx, &runnerScaleSet) require.NoError(t, err) - u := url.String() - expectedUrl := "/_apis/runtime/runnerscalesets?api-version=6.0-preview" - assert.Equal(t, expectedUrl, u) + expectedPath := "/tenant/123/_apis/runtime/runnerscalesets" + assert.Equal(t, expectedPath, url.Path) + assert.Equal(t, "6.0-preview", url.Query().Get("api-version")) }) t.Run("Error when Content-Type is text/plain", func(t *testing.T) { @@ -302,7 +301,7 @@ func TestCreateRunnerScaleSet(t *testing.T) { w.Header().Set("Content-Type", "text/plain") })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.CreateRunnerScaleSet(ctx, &runnerScaleSet) @@ -322,7 +321,6 @@ func TestCreateRunnerScaleSet(t *testing.T) { retryWaitMax := 1 * time.Microsecond client, err := actions.NewClient( - ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), @@ -354,7 +352,7 @@ func TestUpdateRunnerScaleSet(t *testing.T) { w.Write(rsl) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.UpdateRunnerScaleSet(ctx, 1, &actions.RunnerScaleSet{RunnerGroupId: 1}) @@ -365,24 +363,19 @@ func TestUpdateRunnerScaleSet(t *testing.T) { t.Run("UpdateRunnerScaleSet calls correct url", func(t *testing.T) { rsl, err := json.Marshal(&runnerScaleSet) require.NoError(t, err) - url := url.URL{} - method := "" server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + expectedPath := "/tenant/123/_apis/runtime/runnerscalesets/1" + assert.Equal(t, expectedPath, r.URL.Path) + assert.Equal(t, http.MethodPatch, r.Method) + assert.Equal(t, "6.0-preview", r.URL.Query().Get("api-version")) + w.Write(rsl) - url = *r.URL - method = r.Method })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) _, err = client.UpdateRunnerScaleSet(ctx, 1, &runnerScaleSet) require.NoError(t, err) - - u := url.String() - expectedUrl := "/_apis/runtime/runnerscalesets/1?api-version=6.0-preview" - assert.Equal(t, expectedUrl, u) - - assert.Equal(t, "PATCH", method) }) } diff --git a/github/actions/client_runner_test.go b/github/actions/client_runner_test.go index 38d7b29825..1ad4947e79 100644 --- a/github/actions/client_runner_test.go +++ b/github/actions/client_runner_test.go @@ -29,7 +29,7 @@ func TestGetRunner(t *testing.T) { w.Write(response) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GetRunner(ctx, runnerID) @@ -50,7 +50,7 @@ func TestGetRunner(t *testing.T) { actualRetry++ })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), actions.WithRetryWaitMax(retryWaitMax)) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), actions.WithRetryWaitMax(retryWaitMax)) require.NoError(t, err) _, err = client.GetRunner(ctx, runnerID) @@ -78,7 +78,7 @@ func TestGetRunnerByName(t *testing.T) { w.Write(response) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GetRunnerByName(ctx, runnerName) @@ -94,7 +94,7 @@ func TestGetRunnerByName(t *testing.T) { w.Write(response) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GetRunnerByName(ctx, runnerName) @@ -116,7 +116,7 @@ func TestGetRunnerByName(t *testing.T) { actualRetry++ })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), actions.WithRetryWaitMax(retryWaitMax)) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), actions.WithRetryWaitMax(retryWaitMax)) require.NoError(t, err) _, err = client.GetRunnerByName(ctx, runnerName) @@ -138,7 +138,7 @@ func TestDeleteRunner(t *testing.T) { w.WriteHeader(http.StatusNoContent) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) err = client.RemoveRunner(ctx, runnerID) @@ -160,7 +160,6 @@ func TestDeleteRunner(t *testing.T) { })) client, err := actions.NewClient( - ctx, server.configURLForOrg("my-org"), auth, actions.WithRetryMax(retryMax), @@ -193,7 +192,7 @@ func TestGetRunnerGroupByName(t *testing.T) { w.Write(response) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GetRunnerGroupByName(ctx, runnerGroupName) @@ -209,7 +208,7 @@ func TestGetRunnerGroupByName(t *testing.T) { w.Write(response) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) got, err := client.GetRunnerGroupByName(ctx, runnerGroupName) diff --git a/github/actions/client_tls_test.go b/github/actions/client_tls_test.go index 320798b8b3..5e7190b57c 100644 --- a/github/actions/client_tls_test.go +++ b/github/actions/client_tls_test.go @@ -22,9 +22,9 @@ import ( func TestServerWithSelfSignedCertificates(t *testing.T) { ctx := context.Background() - // this handler is a very very barebones replica of actions api // used during the creation of a a new client + var u string h := func(w http.ResponseWriter, r *http.Request) { // handle get registration token if strings.HasSuffix(r.URL.Path, "/runners/registration-token") { @@ -46,9 +46,12 @@ func TestServerWithSelfSignedCertificates(t *testing.T) { require.NoError(t, err) tokenString, err := token.SignedString(privateKey) require.NoError(t, err) - w.Write([]byte(`{"url":"TODO","token":"` + tokenString + `"}`)) + w.Write([]byte(`{"url":"` + u + `","token":"` + tokenString + `"}`)) return } + + // default happy response for RemoveRunner + w.WriteHeader(http.StatusNoContent) } certPath := filepath.Join("testdata", "server.crt") @@ -56,13 +59,17 @@ func TestServerWithSelfSignedCertificates(t *testing.T) { t.Run("client without ca certs", func(t *testing.T) { server := startNewTLSTestServer(t, certPath, keyPath, http.HandlerFunc(h)) + u = server.URL configURL := server.URL + "/my-org" auth := &actions.ActionsAuth{ Token: "token", } - client, err := actions.NewClient(ctx, configURL, auth) - assert.Nil(t, client) + client, err := actions.NewClient(configURL, auth) + require.NoError(t, err) + require.NotNil(t, client) + + err = client.RemoveRunner(ctx, 1) require.NotNil(t, err) if runtime.GOOS == "linux" { @@ -78,6 +85,7 @@ func TestServerWithSelfSignedCertificates(t *testing.T) { t.Run("client with ca certs", func(t *testing.T) { server := startNewTLSTestServer(t, certPath, keyPath, http.HandlerFunc(h)) + u = server.URL configURL := server.URL + "/my-org" auth := &actions.ActionsAuth{ @@ -90,9 +98,12 @@ func TestServerWithSelfSignedCertificates(t *testing.T) { pool, err := actions.RootCAsFromConfigMap(map[string][]byte{"cert": cert}) require.NoError(t, err) - client, err := actions.NewClient(ctx, configURL, auth, actions.WithRootCAs(pool)) + client, err := actions.NewClient(configURL, auth, actions.WithRootCAs(pool)) require.NoError(t, err) assert.NotNil(t, client) + + err = client.RemoveRunner(ctx, 1) + assert.NoError(t, err) }) t.Run("client with ca chain certs", func(t *testing.T) { @@ -102,6 +113,7 @@ func TestServerWithSelfSignedCertificates(t *testing.T) { filepath.Join("testdata", "leaf.key"), http.HandlerFunc(h), ) + u = server.URL configURL := server.URL + "/my-org" auth := &actions.ActionsAuth{ @@ -114,9 +126,12 @@ func TestServerWithSelfSignedCertificates(t *testing.T) { pool, err := actions.RootCAsFromConfigMap(map[string][]byte{"cert": cert}) require.NoError(t, err) - client, err := actions.NewClient(ctx, configURL, auth, actions.WithRootCAs(pool), actions.WithRetryMax(0)) + client, err := actions.NewClient(configURL, auth, actions.WithRootCAs(pool), actions.WithRetryMax(0)) require.NoError(t, err) - assert.NotNil(t, client) + require.NotNil(t, client) + + err = client.RemoveRunner(ctx, 1) + assert.NoError(t, err) }) t.Run("client skipping tls verification", func(t *testing.T) { @@ -127,7 +142,7 @@ func TestServerWithSelfSignedCertificates(t *testing.T) { Token: "token", } - client, err := actions.NewClient(ctx, configURL, auth, actions.WithoutTLSVerify()) + client, err := actions.NewClient(configURL, auth, actions.WithoutTLSVerify()) require.NoError(t, err) assert.NotNil(t, client) }) diff --git a/github/actions/config.go b/github/actions/config.go new file mode 100644 index 0000000000..204fa0a4a2 --- /dev/null +++ b/github/actions/config.go @@ -0,0 +1,98 @@ +package actions + +import ( + "fmt" + "net/url" + "strings" +) + +var ErrInvalidGitHubConfigURL = fmt.Errorf("invalid config URL, should point to an enterprise, org, or repository") + +type GitHubScope int + +const ( + GitHubScopeUnknown GitHubScope = iota + GitHubScopeEnterprise + GitHubScopeOrganization + GitHubScopeRepository +) + +type GitHubConfig struct { + ConfigURL *url.URL + Scope GitHubScope + + Enterprise string + Organization string + Repository string + + IsHosted bool +} + +func ParseGitHubConfigFromURL(in string) (*GitHubConfig, error) { + u, err := url.Parse(in) + if err != nil { + return nil, err + } + + isHosted := u.Host == "github.com" || + u.Host == "www.github.com" || + u.Host == "github.localhost" + + configURL := &GitHubConfig{ + ConfigURL: u, + IsHosted: isHosted, + } + + invalidURLError := fmt.Errorf("%q: %w", u.String(), ErrInvalidGitHubConfigURL) + + pathParts := strings.Split(strings.TrimPrefix(u.Path, "/"), "/") + + switch len(pathParts) { + case 1: // Organization + if pathParts[0] == "" { + return nil, invalidURLError + } + + configURL.Scope = GitHubScopeOrganization + configURL.Organization = pathParts[0] + + case 2: // Repository or enterprise + if strings.ToLower(pathParts[0]) == "enterprises" { + configURL.Scope = GitHubScopeEnterprise + configURL.Enterprise = pathParts[1] + break + } + + configURL.Scope = GitHubScopeRepository + configURL.Organization = pathParts[0] + configURL.Repository = pathParts[1] + default: + return nil, invalidURLError + } + + return configURL, nil +} + +func (c *GitHubConfig) GitHubAPIURL(path string) *url.URL { + result := &url.URL{ + Scheme: c.ConfigURL.Scheme, + } + + switch c.ConfigURL.Host { + // Hosted + case "github.com", "github.localhost": + result.Host = fmt.Sprintf("api.%s", c.ConfigURL.Host) + // re-routing www.github.com to api.github.com + case "www.github.com": + result.Host = "api.github.com" + + // Enterprise + default: + result.Host = c.ConfigURL.Host + result.Path = "/api/v3" + } + + result.Path += path + + return result +} diff --git a/github/actions/config_test.go b/github/actions/config_test.go new file mode 100644 index 0000000000..a9a8368fcb --- /dev/null +++ b/github/actions/config_test.go @@ -0,0 +1,117 @@ +package actions_test + +import ( + "errors" + "net/url" + "testing" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGitHubConfig(t *testing.T) { + t.Run("when given a valid URL", func(t *testing.T) { + tests := []struct { + configURL string + expected *actions.GitHubConfig + }{ + { + configURL: "https://github.com/org/repo", + expected: &actions.GitHubConfig{ + Scope: actions.GitHubScopeRepository, + Enterprise: "", + Organization: "org", + Repository: "repo", + IsHosted: true, + }, + }, + { + configURL: "https://github.com/org", + expected: &actions.GitHubConfig{ + Scope: actions.GitHubScopeOrganization, + Enterprise: "", + Organization: "org", + Repository: "", + IsHosted: true, + }, + }, + { + configURL: "https://github.com/enterprises/my-enterprise", + expected: &actions.GitHubConfig{ + Scope: actions.GitHubScopeEnterprise, + Enterprise: "my-enterprise", + Organization: "", + Repository: "", + IsHosted: true, + }, + }, + { + configURL: "https://www.github.com/org", + expected: &actions.GitHubConfig{ + Scope: actions.GitHubScopeOrganization, + Enterprise: "", + Organization: "org", + Repository: "", + IsHosted: true, + }, + }, + { + configURL: "https://github.localhost/org", + expected: &actions.GitHubConfig{ + Scope: actions.GitHubScopeOrganization, + Enterprise: "", + Organization: "org", + Repository: "", + IsHosted: true, + }, + }, + { + configURL: "https://my-ghes.com/org", + expected: &actions.GitHubConfig{ + Scope: actions.GitHubScopeOrganization, + Enterprise: "", + Organization: "org", + Repository: "", + IsHosted: false, + }, + }, + } + + for _, test := range tests { + t.Run(test.configURL, func(t *testing.T) { + parsedURL, err := url.Parse(test.configURL) + require.NoError(t, err) + test.expected.ConfigURL = parsedURL + + cfg, err := actions.ParseGitHubConfigFromURL(test.configURL) + require.NoError(t, err) + assert.Equal(t, test.expected, cfg) + }) + } + }) + + t.Run("when given an invalid URL", func(t *testing.T) {}) + invalidURLs := []string{ + "https://github.com/", + "https://github.com", + "https://github.com/some/random/path", + } + + for _, u := range invalidURLs { + _, err := actions.ParseGitHubConfigFromURL(u) + require.Error(t, err) + assert.True(t, errors.Is(err, actions.ErrInvalidGitHubConfigURL)) + } +} + +func TestGitHubConfig_GitHubAPIURL(t *testing.T) { + t.Run("when hosted", func(t *testing.T) { + config, err := actions.ParseGitHubConfigFromURL("https://github.com/org/repo") + require.NoError(t, err) + + result := config.GitHubAPIURL("/some/path") + assert.Equal(t, "https://api.github.com/some/path", result.String()) + }) + t.Run("when not hosted", func(t *testing.T) {}) +} diff --git a/github/actions/github_api_request_test.go b/github/actions/github_api_request_test.go new file mode 100644 index 0000000000..3a378149ef --- /dev/null +++ b/github/actions/github_api_request_test.go @@ -0,0 +1,171 @@ +package actions_test + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" + "testing" + "time" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewGitHubAPIRequest(t *testing.T) { + ctx := context.Background() + + t.Run("uses the right host/path prefix", func(t *testing.T) { + scenarios := []struct { + configURL string + path string + expected string + }{ + { + configURL: "https://github.com/org/repo", + path: "/app/installations/123/access_tokens", + expected: "https://api.github.com/app/installations/123/access_tokens", + }, + { + configURL: "https://www.github.com/org/repo", + path: "/app/installations/123/access_tokens", + expected: "https://api.github.com/app/installations/123/access_tokens", + }, + { + configURL: "http://github.localhost/org/repo", + path: "/app/installations/123/access_tokens", + expected: "http://api.github.localhost/app/installations/123/access_tokens", + }, + { + configURL: "https://my-instance.com/org/repo", + path: "/app/installations/123/access_tokens", + expected: "https://my-instance.com/api/v3/app/installations/123/access_tokens", + }, + { + configURL: "http://localhost/org/repo", + path: "/app/installations/123/access_tokens", + expected: "http://localhost/api/v3/app/installations/123/access_tokens", + }, + } + + for _, scenario := range scenarios { + client, err := actions.NewClient(scenario.configURL, nil) + require.NoError(t, err) + + req, err := client.NewGitHubAPIRequest(ctx, http.MethodGet, scenario.path, nil) + require.NoError(t, err) + assert.Equal(t, scenario.expected, req.URL.String()) + } + }) + + t.Run("sets user agent header if present", func(t *testing.T) { + client, err := actions.NewClient("http://localhost/my-org", nil, actions.WithUserAgent("my-agent")) + require.NoError(t, err) + + req, err := client.NewGitHubAPIRequest(ctx, http.MethodGet, "/app/installations/123/access_tokens", nil) + require.NoError(t, err) + + assert.Equal(t, "my-agent", req.Header.Get("User-Agent")) + }) + + t.Run("sets the body we pass", func(t *testing.T) { + client, err := actions.NewClient("http://localhost/my-org", nil) + require.NoError(t, err) + + req, err := client.NewGitHubAPIRequest( + ctx, + http.MethodGet, + "/app/installations/123/access_tokens", + strings.NewReader("the-body"), + ) + require.NoError(t, err) + + b, err := io.ReadAll(req.Body) + require.NoError(t, err) + assert.Equal(t, "the-body", string(b)) + }) +} + +func TestNewActionsServiceRequest(t *testing.T) { + ctx := context.Background() + defaultCreds := &actions.ActionsAuth{Token: "token"} + + t.Run("manages authentication", func(t *testing.T) { + t.Run("client is brand new", func(t *testing.T) { + token := defaultActionsToken(t) + server := newActionsServer(t, nil, withActionsToken(token)) + + client, err := actions.NewClient(server.configURLForOrg("my-org"), defaultCreds) + require.NoError(t, err) + + req, err := client.NewActionsServiceRequest(ctx, http.MethodGet, "my-path", nil) + require.NoError(t, err) + + assert.Equal(t, "Bearer "+token, req.Header.Get("Authorization")) + }) + + t.Run("admin token is about to expire", func(t *testing.T) { + newToken := defaultActionsToken(t) + server := newActionsServer(t, nil, withActionsToken(newToken)) + + client, err := actions.NewClient(server.configURLForOrg("my-org"), defaultCreds) + require.NoError(t, err) + client.ActionsServiceAdminToken = "expiring-token" + client.ActionsServiceAdminTokenExpiresAt = time.Now().Add(59 * time.Second) + + req, err := client.NewActionsServiceRequest(ctx, http.MethodGet, "my-path", nil) + require.NoError(t, err) + + assert.Equal(t, "Bearer "+newToken, req.Header.Get("Authorization")) + }) + + t.Run("token is currently valid", func(t *testing.T) { + tokenThatShouldNotBeFetched := defaultActionsToken(t) + server := newActionsServer(t, nil, withActionsToken(tokenThatShouldNotBeFetched)) + + client, err := actions.NewClient(server.configURLForOrg("my-org"), defaultCreds) + require.NoError(t, err) + client.ActionsServiceAdminToken = "healthy-token" + client.ActionsServiceAdminTokenExpiresAt = time.Now().Add(1 * time.Hour) + + req, err := client.NewActionsServiceRequest(ctx, http.MethodGet, "my-path", nil) + require.NoError(t, err) + + assert.Equal(t, "Bearer healthy-token", req.Header.Get("Authorization")) + }) + }) + + t.Run("builds the right URL including api version", func(t *testing.T) { + server := newActionsServer(t, nil) + + client, err := actions.NewClient(server.configURLForOrg("my-org"), defaultCreds) + require.NoError(t, err) + + req, err := client.NewActionsServiceRequest(ctx, http.MethodGet, "/my/path?name=banana", nil) + require.NoError(t, err) + + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + result := req.URL + assert.Equal(t, serverURL.Host, result.Host) + assert.Equal(t, "/tenant/123/my/path", result.Path) + assert.Equal(t, "banana", result.Query().Get("name")) + assert.Equal(t, "6.0-preview", result.Query().Get("api-version")) + }) + + t.Run("populates header", func(t *testing.T) { + server := newActionsServer(t, nil) + + client, err := actions.NewClient(server.configURLForOrg("my-org"), defaultCreds, actions.WithUserAgent("my-agent")) + require.NoError(t, err) + + req, err := client.NewActionsServiceRequest(ctx, http.MethodGet, "/my/path", nil) + require.NoError(t, err) + + assert.Equal(t, "my-agent", req.Header.Get("User-Agent")) + assert.Equal(t, "application/json", req.Header.Get("Content-Type")) + }) +} diff --git a/github/actions/multi_client.go b/github/actions/multi_client.go index 85e0fa7591..b875c8723d 100644 --- a/github/actions/multi_client.go +++ b/github/actions/multi_client.go @@ -106,7 +106,6 @@ func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, m.logger.Info("creating new client", "githubConfigURL", githubConfigURL, "namespace", namespace) client, err := NewClient( - ctx, githubConfigURL, &creds, WithUserAgent(m.userAgent), diff --git a/github/actions/url_test.go b/github/actions/url_test.go deleted file mode 100644 index ae296a30e1..0000000000 --- a/github/actions/url_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package actions - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestGithubAPIURL(t *testing.T) { - tests := []struct { - configURL string - path string - expected string - }{ - { - configURL: "https://github.com/org/repo", - path: "/app/installations/123/access_tokens", - expected: "https://api.github.com/app/installations/123/access_tokens", - }, - { - configURL: "https://www.github.com/org/repo", - path: "/app/installations/123/access_tokens", - expected: "https://api.github.com/app/installations/123/access_tokens", - }, - { - configURL: "http://github.localhost/org/repo", - path: "/app/installations/123/access_tokens", - expected: "http://api.github.localhost/app/installations/123/access_tokens", - }, - { - configURL: "https://my-instance.com/org/repo", - path: "/app/installations/123/access_tokens", - expected: "https://my-instance.com/api/v3/app/installations/123/access_tokens", - }, - { - configURL: "http://localhost/org/repo", - path: "/app/installations/123/access_tokens", - expected: "http://localhost/api/v3/app/installations/123/access_tokens", - }, - } - - for _, test := range tests { - actual, err := githubAPIURL(test.configURL, test.path) - require.NoError(t, err) - assert.Equal(t, test.expected, actual) - } -} From a72fe4d37f313610cf279784ef205da57bb76e54 Mon Sep 17 00:00:00 2001 From: Kirill Bilchenko Date: Tue, 31 Jan 2023 15:57:42 +0100 Subject: [PATCH 051/561] Fix typos and markdown structure in troubleshooting guide (#2148) --- TROUBLESHOOTING.md | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md index 4aaa6d87c3..e87ccf461d 100644 --- a/TROUBLESHOOTING.md +++ b/TROUBLESHOOTING.md @@ -17,8 +17,8 @@ A list of tools which are helpful for troubleshooting -* https://github.com/rewanthtammana/kubectl-fields Kubernetes resources hierarchy parsing tool -* https://github.com/stern/stern Multi pod and container log tailing for Kubernetes +* [Kubernetes resources hierarchy parsing tool `kubectl-fields`](https://github.com/rewanthtammana/kubectl-fields) +* [Multi pod and container log tailing for Kubernetes `stern`](https://github.com/stern/stern) ## Installation @@ -30,7 +30,7 @@ Troubeshooting runbooks that relate to ARC installation problems This issue can come up for various reasons like leftovers from previous installations or not being able to access the K8s service's clusterIP associated with the admission webhook server (of ARC). -``` +```text Internal error occurred: failed calling webhook "mutate.runnerdeployment.actions.summerwind.dev": Post "https://actions-runner-controller-webhook.actions-runner-system.svc:443/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment?timeout=10s": context deadline exceeded ``` @@ -39,22 +39,24 @@ Post "https://actions-runner-controller-webhook.actions-runner-system.svc:443/mu First we will try the common solution of checking webhook leftovers from previous installations: -1. ```bash - kubectl get validatingwebhookconfiguration -A - kubectl get mutatingwebhookconfiguration -A - ``` -2. If you see any webhooks related to actions-runner-controller, delete them: +1. ```bash + kubectl get validatingwebhookconfiguration -A + kubectl get mutatingwebhookconfiguration -A + ``` + +2. If you see any webhooks related to actions-runner-controller, delete them: + ```bash kubectl delete mutatingwebhookconfiguration actions-runner-controller-mutating-webhook-configuration kubectl delete validatingwebhookconfiguration actions-runner-controller-validating-webhook-configuration ``` If that didn't work then probably your K8s control-plane is somehow unable to access the K8s service's clusterIP associated with the admission webhook server: + 1. You're running apiserver as a binary and you didn't make service cluster IPs available to the host network. 2. You're running the apiserver in the pod but your pod network (i.e. CNI plugin installation and config) is not good so your pods(like kube-apiserver) in the K8s control-plane nodes can't access ARC's admission webhook server pod(s) in probably data-plane nodes. - -Another reason could be due to GKEs firewall settings you may run into the following errors when trying to deploy runners on a private GKE cluster: +Another reason could be due to GKEs firewall settings you may run into the following errors when trying to deploy runners on a private GKE cluster: To fix this, you may either: @@ -93,7 +95,7 @@ To fix this, you may either: **Problem** ```json -2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error +2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error { "controller": "runner", "request": "actions-runner-system/runner-deployment-dk7q8-dk5c9", @@ -104,6 +106,7 @@ To fix this, you may either: **Solution** Your base64'ed PAT token has a new line at the end, it needs to be created without a `\n` added, either: + * `echo -n $TOKEN | base64` * Create the secret as described in the docs using the shell and documented flags @@ -111,7 +114,7 @@ Your base64'ed PAT token has a new line at the end, it needs to be created witho **Problem** -``` +```text Error: UPGRADE FAILED: failed to create resource: Internal error occurred: failed calling webhook "webhook.cert-manager.io": failed to call webhook: Post "https://cert-manager-webhook.cert-manager.svc:443/mutate?timeout=10s": x509: certificate signed by unknown authority ``` @@ -119,7 +122,7 @@ Apparently, it's failing while `helm` is creating one of resources defined in th You'd try to tail logs from the `cert-manager-cainjector` and see it's failing with an error like: -``` +```text $ kubectl -n cert-manager logs cert-manager-cainjector-7cdbb9c945-g6bt4 I0703 03:31:55.159339 1 start.go:91] "starting" version="v1.1.1" revision="3ac7418070e22c87fae4b22603a6b952f797ae96" I0703 03:31:55.615061 1 leaderelection.go:243] attempting to acquire leader lease kube-system/cert-manager-cainjector-leader-election... @@ -137,7 +140,7 @@ Your cluster is based on a new enough Kubernetes of version 1.22 or greater whic In many cases, it's not an option to downgrade Kubernetes. So, just upgrade `cert-manager` to a more recent version that does have have the support for the specific Kubernetes version you're using. -See https://cert-manager.io/docs/installation/supported-releases/ for the list of available cert-manager versions. +See for the list of available cert-manager versions. ## Operations @@ -153,7 +156,7 @@ Sometimes either the runner kind (`kubectl get runners`) or it's underlying pod Remove the finaliser from the relevent runner kind or pod -``` +```text # Get all kind runners and remove the finalizer $ kubectl get runners --no-headers | awk {'print $1'} | xargs kubectl patch runner --type merge -p '{"metadata":{"finalizers":null}}' @@ -195,7 +198,7 @@ spec: If you're running your action runners on a service mesh like Istio, you might have problems with runner configuration accompanied by logs like: -``` +```text .... runner Starting Runner listener with startup type: service runner Started listener process @@ -210,7 +213,7 @@ configuration script tries to communicate with the network. More broadly, there are many other circumstances where the runner pod coming up first can cause issues. -**Solution**
+**Solution** > Added originally to help users with older istio instances. > Newer Istio instances can use Istio's `holdApplicationUntilProxyStarts` attribute ([istio/istio#11130](https://github.com/istio/istio/issues/11130)) to avoid having to delay starting up the runner. @@ -232,7 +235,7 @@ spec: value: "5" ``` -## Outgoing network action hangs indefinitely +### Outgoing network action hangs indefinitely **Problem** @@ -278,9 +281,9 @@ spec: ``` You can read the discussion regarding this issue in -(#1406)[https://github.com/actions/actions-runner-controller/issues/1046]. +[#1406](https://github.com/actions/actions-runner-controller/issues/1046). -## Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns +### Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns **Problem** @@ -292,7 +295,7 @@ You very likely have some dangling workflow jobs stuck in `queued` or `in_progre Manually call [the "list workflow runs" API](https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs-for-a-repository), and [remove the dangling workflow job(s)](https://docs.github.com/en/rest/actions/workflow-runs#delete-a-workflow-run). -## Slow / failure to boot dind sidecar (default runner) +### Slow / failure to boot dind sidecar (default runner) **Problem** From d19265eb3a0eaeeacb9d9c523a79e7e38be2a4c9 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 31 Jan 2023 15:03:11 -0500 Subject: [PATCH 052/561] Delete RunnerScaleSet on service when AutoScalingRunnerSet is deleted. (#2223) --- .../autoscalingrunnerset_controller.go | 55 ++++++++++++++---- .../autoscalingrunnerset_controller_test.go | 56 +++++++++++++++++++ github/actions/client.go | 1 + .../actions/client_runner_scale_set_test.go | 36 ++++++++++++ github/actions/fake/client.go | 16 +++++- github/actions/mock_ActionsService.go | 14 +++++ 6 files changed, 167 insertions(+), 11 deletions(-) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index bae05cdf1b..391ca9667a 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -112,6 +112,12 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, nil } + err = r.deleteRunnerScaleSet(ctx, autoscalingRunnerSet, log) + if err != nil { + log.Error(err, "Failed to delete runner scale set") + return ctrl.Result{}, err + } + log.Info("Removing finalizer") err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName) @@ -154,7 +160,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl // Make sure the runner group of the scale set is up to date currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetRunnerGroupNameKey] - if !ok || !strings.EqualFold(currentRunnerGroupName, autoscalingRunnerSet.Spec.RunnerGroup) { + if !ok || (len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 && !strings.EqualFold(currentRunnerGroupName, autoscalingRunnerSet.Spec.RunnerGroup)) { log.Info("AutoScalingRunnerSet runner group changed. Updating the runner scale set.") return r.updateRunnerScaleSetRunnerGroup(ctx, autoscalingRunnerSet, log) } @@ -185,7 +191,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl } if desiredSpecHash != latestRunnerSet.Labels[LabelKeyRunnerSpecHash] { - log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set ") + log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set") return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log) } @@ -342,7 +348,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex } } - logger.Info("Created/Reused a runner scale set", "id", runnerScaleSet.Id) + logger.Info("Created/Reused a runner scale set", "id", runnerScaleSet.Id, "runnerGroupName", runnerScaleSet.RunnerGroupName) if autoscalingRunnerSet.Annotations == nil { autoscalingRunnerSet.Annotations = map[string]string{} } @@ -356,7 +362,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex return ctrl.Result{}, err } - logger.Info("Updated with runner scale set ID as an annotation") + logger.Info("Updated with runner scale set ID and runner group name as an annotation") return ctrl.Result{}, nil } @@ -373,13 +379,18 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con return ctrl.Result{}, err } - runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup) - if err != nil { - logger.Error(err, "Failed to get runner group by name", "runnerGroup", autoscalingRunnerSet.Spec.RunnerGroup) - return ctrl.Result{}, err + runnerGroupId := 1 + if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 { + runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup) + if err != nil { + logger.Error(err, "Failed to get runner group by name", "runnerGroup", autoscalingRunnerSet.Spec.RunnerGroup) + return ctrl.Result{}, err + } + + runnerGroupId = int(runnerGroup.ID) } - updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetId, &actions.RunnerScaleSet{Name: autoscalingRunnerSet.Name, RunnerGroupId: int(runnerGroup.ID)}) + updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetId, &actions.RunnerScaleSet{Name: autoscalingRunnerSet.Name, RunnerGroupId: runnerGroupId}) if err != nil { logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetId) return ctrl.Result{}, err @@ -397,6 +408,30 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con return ctrl.Result{}, nil } +func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error { + logger.Info("Deleting the runner scale set from Actions service") + runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) + if err != nil { + logger.Error(err, "Failed to parse runner scale set ID") + return err + } + + actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) + if err != nil { + logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set") + return err + } + + err = actionsClient.DeleteRunnerScaleSet(ctx, runnerScaleSetId) + if err != nil { + logger.Error(err, "Failed to delete runner scale set", "runnerScaleSetId", runnerScaleSetId) + return err + } + + logger.Info("Deleted the runner scale set from Actions service") + return nil +} + func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) { desiredRunnerSet, err := r.resourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet) if err != nil { @@ -409,7 +444,7 @@ func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Co return ctrl.Result{}, err } - log.Info("Creating a new EphemeralRunnerSet resource", "name", desiredRunnerSet.Name) + log.Info("Creating a new EphemeralRunnerSet resource") if err := r.Create(ctx, desiredRunnerSet); err != nil { log.Error(err, "Failed to create EphemeralRunnerSet resource") return ctrl.Result{}, err diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index d281d8ccc7..911b8c4d00 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -368,5 +368,61 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(string(listener.UID)), "New Listener should be created") }) + + It("It should update RunnerScaleSet's runner group on service when it changes", func() { + updated := new(actionsv1alpha1.AutoscalingRunnerSet) + // Wait till the listener is created + Eventually( + func() error { + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") + + patched := autoscalingRunnerSet.DeepCopy() + patched.Spec.RunnerGroup = "testgroup2" + err := k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) + Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") + + // Check if AutoScalingRunnerSet has the new runner group in its annotation + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated) + if err != nil { + return "", err + } + + if _, ok := updated.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok { + return "", nil + } + + return updated.Annotations[runnerScaleSetRunnerGroupNameKey], nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the new runner group in its annotation") + + // delete the annotation and it should be re-added + patched = autoscalingRunnerSet.DeepCopy() + delete(patched.Annotations, runnerScaleSetRunnerGroupNameKey) + err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) + Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") + + // Check if AutoScalingRunnerSet still has the runner group in its annotation + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated) + if err != nil { + return "", err + } + + if _, ok := updated.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok { + return "", nil + } + + return updated.Annotations[runnerScaleSetRunnerGroupNameKey], nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the runner group in its annotation") + }) }) }) diff --git a/github/actions/client.go b/github/actions/client.go index 70b02b1489..c447f7cf58 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -35,6 +35,7 @@ type ActionsService interface { GetRunnerGroupByName(ctx context.Context, runnerGroup string) (*RunnerGroup, error) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) + DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int) error CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*RunnerScaleSetSession, error) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error diff --git a/github/actions/client_runner_scale_set_test.go b/github/actions/client_runner_scale_set_test.go index 5354a0e584..a1249417d3 100644 --- a/github/actions/client_runner_scale_set_test.go +++ b/github/actions/client_runner_scale_set_test.go @@ -379,3 +379,39 @@ func TestUpdateRunnerScaleSet(t *testing.T) { require.NoError(t, err) }) } + +func TestDeleteRunnerScaleSet(t *testing.T) { + ctx := context.Background() + auth := &actions.ActionsAuth{ + Token: "token", + } + + t.Run("Delete runner scale set", func(t *testing.T) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + assert.Contains(t, r.URL.String(), "/_apis/runtime/runnerscalesets/10?api-version=6.0-preview") + w.WriteHeader(http.StatusNoContent) + })) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + err = client.DeleteRunnerScaleSet(ctx, 10) + assert.NoError(t, err) + }) + + t.Run("Delete calls with error", func(t *testing.T) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + assert.Contains(t, r.URL.String(), "/_apis/runtime/runnerscalesets/10?api-version=6.0-preview") + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"message": "test error"}`)) + })) + + client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + err = client.DeleteRunnerScaleSet(ctx, 10) + assert.ErrorContains(t, err, "test error") + }) +} diff --git a/github/actions/fake/client.go b/github/actions/fake/client.go index 1983180e5a..29321b84c7 100644 --- a/github/actions/fake/client.go +++ b/github/actions/fake/client.go @@ -17,6 +17,13 @@ func WithGetRunnerScaleSetResult(scaleSet *actions.RunnerScaleSet, err error) Op } } +func WithGetRunnerGroup(runnerGroup *actions.RunnerGroup, err error) Option { + return func(f *FakeClient) { + f.getRunnerGroupByNameResult.RunnerGroup = runnerGroup + f.getRunnerGroupByNameResult.err = err + } +} + func WithGetRunner(runner *actions.RunnerReference, err error) Option { return func(f *FakeClient) { f.getRunnerResult.RunnerReference = runner @@ -40,7 +47,7 @@ var defaultUpdatedRunnerScaleSet = &actions.RunnerScaleSet{ Id: 1, Name: "testset", RunnerGroupId: 2, - RunnerGroupName: "testgroup", + RunnerGroupName: "testgroup2", Labels: []actions.Label{{Type: "test", Name: "test"}}, RunnerSetting: actions.RunnerSetting{}, CreatedOn: time.Now(), @@ -123,6 +130,9 @@ type FakeClient struct { *actions.RunnerScaleSet err error } + deleteRunnerScaleSetResult struct { + err error + } createMessageSessionResult struct { *actions.RunnerScaleSetSession err error @@ -211,6 +221,10 @@ func (f *FakeClient) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId return f.updateRunnerScaleSetResult.RunnerScaleSet, f.updateRunnerScaleSetResult.err } +func (f *FakeClient) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int) error { + return f.deleteRunnerScaleSetResult.err +} + func (f *FakeClient) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) { return f.createMessageSessionResult.RunnerScaleSetSession, f.createMessageSessionResult.err } diff --git a/github/actions/mock_ActionsService.go b/github/actions/mock_ActionsService.go index d227e8be3e..ba10de8daa 100644 --- a/github/actions/mock_ActionsService.go +++ b/github/actions/mock_ActionsService.go @@ -111,6 +111,20 @@ func (_m *MockActionsService) DeleteMessageSession(ctx context.Context, runnerSc return r0 } +// DeleteRunnerScaleSet provides a mock function with given fields: ctx, runnerScaleSetId +func (_m *MockActionsService) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int) error { + ret := _m.Called(ctx, runnerScaleSetId) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int) error); ok { + r0 = rf(ctx, runnerScaleSetId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // GenerateJitRunnerConfig provides a mock function with given fields: ctx, jitRunnerSetting, scaleSetId func (_m *MockActionsService) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *RunnerScaleSetJitRunnerSetting, scaleSetId int) (*RunnerScaleSetJitRunnerConfig, error) { ret := _m.Called(ctx, jitRunnerSetting, scaleSetId) From 69b9ecb39ece66272e18517759e7dde0244f98e6 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 31 Jan 2023 16:00:26 -0500 Subject: [PATCH 053/561] Resolve CI break due to bad merge. (#2236) --- github/actions/client_runner_scale_set_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/github/actions/client_runner_scale_set_test.go b/github/actions/client_runner_scale_set_test.go index a1249417d3..d313d013cc 100644 --- a/github/actions/client_runner_scale_set_test.go +++ b/github/actions/client_runner_scale_set_test.go @@ -393,7 +393,7 @@ func TestDeleteRunnerScaleSet(t *testing.T) { w.WriteHeader(http.StatusNoContent) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) err = client.DeleteRunnerScaleSet(ctx, 10) @@ -408,7 +408,7 @@ func TestDeleteRunnerScaleSet(t *testing.T) { w.Write([]byte(`{"message": "test error"}`)) })) - client, err := actions.NewClient(ctx, server.configURLForOrg("my-org"), auth) + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) err = client.DeleteRunnerScaleSet(ctx, 10) From 16cb245c7a8f4467a0a3a6e30d49e1b9c58b969e Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 31 Jan 2023 17:04:03 -0500 Subject: [PATCH 054/561] Allow provide pre-defined kubernetes secret when helm-install AutoScalingRunnerSet (#2234) --- .../templates/_helpers.tpl | 8 ++ .../templates/githubsecret.yaml | 2 + .../tests/template_test.go | 78 +++++++++++++++++++ charts/auto-scaling-runner-set/values.yaml | 8 ++ 4 files changed, 96 insertions(+) diff --git a/charts/auto-scaling-runner-set/templates/_helpers.tpl b/charts/auto-scaling-runner-set/templates/_helpers.tpl index 13889c0a1c..d4ca939fad 100644 --- a/charts/auto-scaling-runner-set/templates/_helpers.tpl +++ b/charts/auto-scaling-runner-set/templates/_helpers.tpl @@ -51,7 +51,15 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{- define "auto-scaling-runner-set.githubsecret" -}} + {{- if kindIs "string" .Values.githubConfigSecret }} + {{- if not (empty .Values.githubConfigSecret) }} +{{- .Values.githubConfigSecret }} + {{- else}} +{{- fail "Values.githubConfigSecret is required for setting auth with GitHub server." }} + {{- end }} + {{- else }} {{- include "auto-scaling-runner-set.fullname" . }}-github-secret + {{- end }} {{- end }} {{- define "auto-scaling-runner-set.noPermissionServiceAccountName" -}} diff --git a/charts/auto-scaling-runner-set/templates/githubsecret.yaml b/charts/auto-scaling-runner-set/templates/githubsecret.yaml index 73e84a7a8d..4374f8335d 100644 --- a/charts/auto-scaling-runner-set/templates/githubsecret.yaml +++ b/charts/auto-scaling-runner-set/templates/githubsecret.yaml @@ -1,3 +1,4 @@ +{{- if not (kindIs "string" .Values.githubConfigSecret) }} apiVersion: v1 kind: Secret metadata: @@ -35,3 +36,4 @@ data: {{- if and $hasAppId (or (not $hasInstallationId) (not $hasPrivateKey)) }} {{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key." }} {{- end }} +{{- end}} \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/tests/template_test.go b/charts/auto-scaling-runner-set/tests/template_test.go index 099b24b601..954b8d29f9 100644 --- a/charts/auto-scaling-runner-set/tests/template_test.go +++ b/charts/auto-scaling-runner-set/tests/template_test.go @@ -124,6 +124,28 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAppInput(t *testing.T) { assert.ErrorContains(t, err, "provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key") } +func TestTemplateNotRenderedGitHubSecretWithPredefinedSecret(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secret", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"}) + assert.ErrorContains(t, err, "could not find template templates/githubsecret.yaml in chart", "secret should not be rendered since a pre-defined secret is provided") +} + func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) { t.Parallel() @@ -631,3 +653,59 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T) assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[0].Name) assert.NotNil(t, ars.Spec.Template.Spec.Volumes[0].Ephemeral, "Template.Spec should have 1 ephemeral volume") } + +func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secrets", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, namespaceName, ars.Namespace) + assert.Equal(t, "test-runners", ars.Name) + + assert.Equal(t, "auto-scaling-runner-set", ars.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) + assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) + assert.Equal(t, "pre-defined-secrets", ars.Spec.GitHubConfigSecret) +} + +func TestTemplateRenderedAutoScalingRunnerSet_ErrorOnEmptyPredefinedSecret(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + require.Error(t, err) + + assert.ErrorContains(t, err, "Values.githubConfigSecret is required for setting auth with GitHub server") +} diff --git a/charts/auto-scaling-runner-set/values.yaml b/charts/auto-scaling-runner-set/values.yaml index 2ee11db6c2..6494ecda53 100644 --- a/charts/auto-scaling-runner-set/values.yaml +++ b/charts/auto-scaling-runner-set/values.yaml @@ -13,6 +13,14 @@ githubConfigSecret: ### GitHub PAT Configuration github_token: "" +## If you have a pre-define Kubernetes secret in the same namespace the auto-scaling-runner-set is going to deploy, +## you can also reference it via `githubConfigSecret: pre-defined-secret`. +## You need to make sure your predefined secret has all the required secret data set properly. +## For a pre-defined secret using GitHub PAT, the secret needs to be created like this: +## > kubectl create secret generic pre-defined-secret --namespace=my_namespace --from-literal=github_token='ghp_your_pat' +## For a pre-defined secret using GitHub App, the secret needs to be created like this: +## > kubectl create secret generic pre-defined-secret --namespace=my_namespace --from-literal=github_app_id=123456 --from-literal=github_app_installation_id=654321 --from-literal=github_app_private_key='-----BEGIN CERTIFICATE-----*******' +# githubConfigSecret: pre-defined-secret ## maxRunners is the max number of runners the auto scaling runner set will scale up to. # maxRunners: 5 From 150707a3582ecfb1e36aa9254a0cf30b29fe7e14 Mon Sep 17 00:00:00 2001 From: dhawalseth Date: Wed, 1 Feb 2023 00:04:18 -0800 Subject: [PATCH 055/561] Add documentation to update ARC with prometheus CRDs needed by actions metrics server (#2209) Co-authored-by: Yusuke Kuoka --- charts/actions-runner-controller/docs/UPGRADING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/charts/actions-runner-controller/docs/UPGRADING.md b/charts/actions-runner-controller/docs/UPGRADING.md index 79a9213430..4625715ab1 100644 --- a/charts/actions-runner-controller/docs/UPGRADING.md +++ b/charts/actions-runner-controller/docs/UPGRADING.md @@ -29,6 +29,8 @@ curl -L https://github.com/actions/actions-runner-controller/releases/download/a kubectl replace -f crds/ ``` +Note that in case you're going to create prometheus-operator `ServiceMonitor` resources via the chart, you'd need to deploy prometheus-operator-related CRDs as well. + 2. Upgrade the Helm release ```shell From a8faac531782f00360b30cb842075ce4a9b5669a Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Wed, 1 Feb 2023 13:47:54 +0000 Subject: [PATCH 056/561] Add Identifier to actions.Client (#2237) --- github/actions/client.go | 23 +++++ github/actions/config_test.go | 23 +++-- github/actions/identifier_test.go | 111 ++++++++++++++++++++ github/actions/multi_client.go | 67 ++++--------- github/actions/multi_client_test.go | 150 +++++++--------------------- 5 files changed, 200 insertions(+), 174 deletions(-) create mode 100644 github/actions/identifier_test.go diff --git a/github/actions/client.go b/github/actions/client.go index c447f7cf58..63c9ed169a 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -165,6 +165,29 @@ func NewClient(githubConfigURL string, creds *ActionsAuth, options ...ClientOpti return ac, nil } +// Identifier returns a string to help identify a client uniquely. +// This is used for caching client instances and understanding when a config +// change warrants creating a new client. Any changes to Client that would +// require a new client should be reflected here. +func (c *Client) Identifier() string { + identifier := fmt.Sprintf("configURL:%q,", c.config.ConfigURL.String()) + + if c.creds.Token != "" { + identifier += fmt.Sprintf("token:%q", c.creds.Token) + } + + if c.creds.AppCreds != nil { + identifier += fmt.Sprintf( + "appID:%q,installationID:%q,key:%q", + c.creds.AppCreds.AppID, + c.creds.AppCreds.AppInstallationID, + c.creds.AppCreds.AppPrivateKey, + ) + } + + return uuid.NewMD5(uuid.NameSpaceOID, []byte(identifier)).String() +} + func (c *Client) Do(req *http.Request) (*http.Response, error) { resp, err := c.Client.Do(req) if err != nil { diff --git a/github/actions/config_test.go b/github/actions/config_test.go index a9a8368fcb..e64928e262 100644 --- a/github/actions/config_test.go +++ b/github/actions/config_test.go @@ -91,18 +91,19 @@ func TestGitHubConfig(t *testing.T) { } }) - t.Run("when given an invalid URL", func(t *testing.T) {}) - invalidURLs := []string{ - "https://github.com/", - "https://github.com", - "https://github.com/some/random/path", - } + t.Run("when given an invalid URL", func(t *testing.T) { + invalidURLs := []string{ + "https://github.com/", + "https://github.com", + "https://github.com/some/random/path", + } - for _, u := range invalidURLs { - _, err := actions.ParseGitHubConfigFromURL(u) - require.Error(t, err) - assert.True(t, errors.Is(err, actions.ErrInvalidGitHubConfigURL)) - } + for _, u := range invalidURLs { + _, err := actions.ParseGitHubConfigFromURL(u) + require.Error(t, err) + assert.True(t, errors.Is(err, actions.ErrInvalidGitHubConfigURL)) + } + }) } func TestGitHubConfig_GitHubAPIURL(t *testing.T) { diff --git a/github/actions/identifier_test.go b/github/actions/identifier_test.go new file mode 100644 index 0000000000..0a184f86cd --- /dev/null +++ b/github/actions/identifier_test.go @@ -0,0 +1,111 @@ +package actions_test + +import ( + "testing" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestClient_Identifier(t *testing.T) { + t.Run("configURL changes", func(t *testing.T) { + scenarios := []struct { + name string + url string + }{ + { + name: "url of a different repo", + url: "https://github.com/org/repo2", + }, + { + name: "url of an org", + url: "https://github.com/org", + }, + { + name: "url of an enterprise", + url: "https://github.com/enterprises/my-enterprise", + }, + { + name: "url of a self-hosted github", + url: "https://selfhosted.com/org/repo", + }, + } + + configURL := "https://github.com/org/repo" + defaultCreds := &actions.ActionsAuth{ + Token: "token", + } + oldClient, err := actions.NewClient(configURL, defaultCreds) + require.NoError(t, err) + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + newClient, err := actions.NewClient(scenario.url, defaultCreds) + require.NoError(t, err) + assert.NotEqual(t, oldClient.Identifier(), newClient.Identifier()) + }) + } + }) + + t.Run("credentials change", func(t *testing.T) { + defaultTokenCreds := &actions.ActionsAuth{ + Token: "token", + } + defaultAppCreds := &actions.ActionsAuth{ + AppCreds: &actions.GitHubAppAuth{ + AppID: 123, + AppInstallationID: 123, + AppPrivateKey: "private key", + }, + } + + scenarios := []struct { + name string + old *actions.ActionsAuth + new *actions.ActionsAuth + }{ + { + name: "different token", + old: defaultTokenCreds, + new: &actions.ActionsAuth{ + Token: "new token", + }, + }, + { + name: "changing from token to github app", + old: defaultTokenCreds, + new: defaultAppCreds, + }, + { + name: "changing from github app to token", + old: defaultAppCreds, + new: defaultTokenCreds, + }, + { + name: "different github app", + old: defaultAppCreds, + new: &actions.ActionsAuth{ + AppCreds: &actions.GitHubAppAuth{ + AppID: 456, + AppInstallationID: 456, + AppPrivateKey: "new private key", + }, + }, + }, + } + + defaultConfigURL := "https://github.com/org/repo" + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + oldClient, err := actions.NewClient(defaultConfigURL, scenario.old) + require.NoError(t, err) + + newClient, err := actions.NewClient(defaultConfigURL, scenario.new) + require.NoError(t, err) + assert.NotEqual(t, oldClient.Identifier(), newClient.Identifier()) + }) + } + }) +} diff --git a/github/actions/multi_client.go b/github/actions/multi_client.go index b875c8723d..1e2041398d 100644 --- a/github/actions/multi_client.go +++ b/github/actions/multi_client.go @@ -4,7 +4,6 @@ import ( "context" "crypto/x509" "fmt" - "net/url" "strconv" "sync" @@ -19,7 +18,7 @@ type MultiClient interface { type multiClient struct { // To lock adding and removing of individual clients. mu sync.Mutex - clients map[ActionsClientKey]*actionsClientWrapper + clients map[ActionsClientKey]*Client logger logr.Logger userAgent string @@ -40,22 +39,14 @@ type ActionsAuth struct { } type ActionsClientKey struct { - ActionsURL string - Auth ActionsAuth + Identifier string Namespace string } -type actionsClientWrapper struct { - // To lock client usage when tokens are being refreshed. - mu sync.Mutex - - client ActionsService -} - func NewMultiClient(userAgent string, logger logr.Logger) MultiClient { return &multiClient{ mu: sync.Mutex{}, - clients: make(map[ActionsClientKey]*actionsClientWrapper), + clients: make(map[ActionsClientKey]*Client), logger: logger, userAgent: userAgent, } @@ -64,11 +55,6 @@ func NewMultiClient(userAgent string, logger logr.Logger) MultiClient { func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, creds ActionsAuth, namespace string) (ActionsService, error) { m.logger.Info("retrieve actions client", "githubConfigURL", githubConfigURL, "namespace", namespace) - parsedGitHubURL, err := url.Parse(githubConfigURL) - if err != nil { - return nil, err - } - if creds.Token == "" && creds.AppCreds == nil { return nil, fmt.Errorf("no credentials provided. either a PAT or GitHub App credentials should be provided") } @@ -77,34 +63,6 @@ func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, return nil, fmt.Errorf("both PAT and GitHub App credentials provided. should only provide one") } - key := ActionsClientKey{ - ActionsURL: parsedGitHubURL.String(), - Namespace: namespace, - } - - if creds.AppCreds != nil { - key.Auth = ActionsAuth{ - AppCreds: creds.AppCreds, - } - } - - if creds.Token != "" { - key.Auth = ActionsAuth{ - Token: creds.Token, - } - } - - m.mu.Lock() - defer m.mu.Unlock() - - clientWrapper, has := m.clients[key] - if has { - m.logger.Info("using cache client", "githubConfigURL", githubConfigURL, "namespace", namespace) - return clientWrapper.client, nil - } - - m.logger.Info("creating new client", "githubConfigURL", githubConfigURL, "namespace", namespace) - client, err := NewClient( githubConfigURL, &creds, @@ -115,11 +73,24 @@ func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, return nil, err } - m.clients[key] = &actionsClientWrapper{ - mu: sync.Mutex{}, - client: client, + m.mu.Lock() + defer m.mu.Unlock() + + key := ActionsClientKey{ + Identifier: client.Identifier(), + Namespace: namespace, } + cachedClient, has := m.clients[key] + if has { + m.logger.Info("using cache client", "githubConfigURL", githubConfigURL, "namespace", namespace) + return cachedClient, nil + } + + m.logger.Info("creating new client", "githubConfigURL", githubConfigURL, "namespace", namespace) + + m.clients[key] = client + m.logger.Info("successfully created new client", "githubConfigURL", githubConfigURL, "namespace", namespace) return client, nil diff --git a/github/actions/multi_client_test.go b/github/actions/multi_client_test.go index fb4a64dbe7..a61686c658 100644 --- a/github/actions/multi_client_test.go +++ b/github/actions/multi_client_test.go @@ -2,131 +2,51 @@ package actions import ( "context" - "encoding/json" "fmt" - "net/http" - "net/http/httptest" - "strings" "testing" - "time" "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestAddClient(t *testing.T) { +func TestMultiClientCaching(t *testing.T) { logger := logr.Discard() - multiClient := NewMultiClient("test-user-agent", logger).(*multiClient) - ctx := context.Background() + multiClient := NewMultiClient("test-user-agent", logger).(*multiClient) - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if strings.HasSuffix(r.URL.Path, "actions/runners/registration-token") { - w.WriteHeader(http.StatusCreated) - w.Header().Set("Content-Type", "application/json") - - token := "abc-123" - rt := ®istrationToken{Token: &token} - - if err := json.NewEncoder(w).Encode(rt); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - } - if strings.HasSuffix(r.URL.Path, "actions/runner-registration") { - w.Header().Set("Content-Type", "application/json") - - url := "actions.github.com/abc" - jwt := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiaWF0IjoxNTE2MjM5MDIyLCJleHAiOjI1MTYyMzkwMjJ9.tlrHslTmDkoqnc4Kk9ISoKoUNDfHo-kjlH-ByISBqzE" - adminConnInfo := &ActionsServiceAdminConnection{ActionsServiceUrl: &url, AdminToken: &jwt} - - if err := json.NewEncoder(w).Encode(adminConnInfo); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - } - if strings.HasSuffix(r.URL.Path, "/access_tokens") { - w.Header().Set("Content-Type", "application/vnd.github+json") - - t, _ := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z07:00") - accessToken := &accessToken{ - Token: "abc-123", - ExpiresAt: t, - } - - if err := json.NewEncoder(w).Encode(accessToken); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - } - })) - defer srv.Close() - - want := 1 - if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{Token: "PAT"}, "namespace"); err != nil { - t.Fatal(err) - } - - want++ // New repo - if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/actions", srv.URL), ActionsAuth{Token: "PAT"}, "namespace"); err != nil { - t.Fatal(err) - } - - // Repeat - if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{Token: "PAT"}, "namespace"); err != nil { - t.Fatal(err) - } - - want++ // New namespace - if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{Token: "PAT"}, "other"); err != nil { - t.Fatal(err) - } - - want++ // New pat - if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{Token: "other"}, "other"); err != nil { - t.Fatal(err) - } - - want++ // New org - if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github", srv.URL), ActionsAuth{Token: "PAT"}, "other"); err != nil { - t.Fatal(err) - } - - // No org, repo, enterprise - if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v", srv.URL), ActionsAuth{Token: "PAT"}, "other"); err == nil { - t.Fatal(err) - } - - want++ // Test keying on GitHub App - appAuth := &GitHubAppAuth{ - AppID: 1, - AppPrivateKey: `-----BEGIN RSA PRIVATE KEY----- -MIICWgIBAAKBgHXfRT9cv9UY9fAAD4+1RshpfSSZe277urfEmPfX3/Og9zJYRk// -CZrJVD1CaBZDiIyQsNEzjta7r4UsqWdFOggiNN2E7ZTFQjMSaFkVgrzHqWuiaCBf -/BjbKPn4SMDmTzHvIe7Nel76hBdCaVgu6mYCW5jmuSH5qz/yR1U1J/WJAgMBAAEC -gYARWGWsSU3BYgbu5lNj5l0gKMXNmPhdAJYdbMTF0/KUu18k/XB7XSBgsre+vALt -I8r4RGKApoGif8P4aPYUyE8dqA1bh0X3Fj1TCz28qoUL5//dA+pigCRS20H7HM3C -ojoqF7+F+4F2sXmzFNd1NgY5RxFPYosTT7OnUiFuu2IisQJBALnMLe09LBnjuHXR -xxR65DDNxWPQLBjW3dL+ubLcwr7922l6ZIQsVjdeE0ItEUVRjjJ9/B/Jq9VJ/Lw4 -g9LCkkMCQQCiaM2f7nYmGivPo9hlAbq5lcGJ5CCYFfeeYzTxMqum7Mbqe4kk5lgb -X6gWd0Izg2nGdAEe/97DClO6VpKcPbpDAkBTR/JOJN1fvXMxXJaf13XxakrQMr+R -Yr6LlSInykyAz8lJvlLP7A+5QbHgN9NF/wh+GXqpxPwA3ukqdSqhjhWBAkBn6mDv -HPgR5xrzL6XM8y9TgaOlJAdK6HtYp6d/UOmN0+Butf6JUq07TphRT5tXNJVgemch -O5x/9UKfbrc+KyzbAkAo97TfFC+mZhU1N5fFelaRu4ikPxlp642KRUSkOh8GEkNf -jQ97eJWiWtDcsMUhcZgoB5ydHcFlrBIn6oBcpge5 ------END RSA PRIVATE KEY-----`, - } - if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{AppCreds: appAuth}, "other"); err != nil { - t.Fatal(err) - } - - // Repeat last to verify GitHub App keys are mapped together - if _, err := multiClient.GetClientFor(ctx, fmt.Sprintf("%v/github/github", srv.URL), ActionsAuth{AppCreds: appAuth}, "other"); err != nil { - t.Fatal(err) - } - - if len(multiClient.clients) != want { - t.Fatalf("GetClientFor: unexpected number of clients: got=%v want=%v", len(multiClient.clients), want) + defaultNamespace := "default" + defaultConfigURL := "https://github.com/org/repo" + defaultCreds := &ActionsAuth{ + Token: "token", } + client, err := NewClient(defaultConfigURL, defaultCreds) + require.NoError(t, err) + + multiClient.clients[ActionsClientKey{client.Identifier(), defaultNamespace}] = client + + // Verify that the client is cached + cachedClient, err := multiClient.GetClientFor( + ctx, + defaultConfigURL, + *defaultCreds, + defaultNamespace, + ) + require.NoError(t, err) + assert.Equal(t, client, cachedClient) + assert.Len(t, multiClient.clients, 1) + + // Asking for a different client results in creating and caching a new client + otherNamespace := "other" + newClient, err := multiClient.GetClientFor( + ctx, + defaultConfigURL, + *defaultCreds, + otherNamespace, + ) + require.NoError(t, err) + assert.NotEqual(t, client, newClient) + assert.Len(t, multiClient.clients, 2) } func TestCreateJWT(t *testing.T) { From 3e021b081d432df84a9ea1f75a67e4ce06892ebf Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Thu, 2 Feb 2023 08:28:34 +0000 Subject: [PATCH 057/561] Use UUID v5 for client identifiers (#2241) --- github/actions/client.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/github/actions/client.go b/github/actions/client.go index 63c9ed169a..2d3c790682 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -3,6 +3,7 @@ package actions import ( "bytes" "context" + "crypto/sha256" "crypto/tls" "crypto/x509" "encoding/base64" @@ -185,7 +186,7 @@ func (c *Client) Identifier() string { ) } - return uuid.NewMD5(uuid.NameSpaceOID, []byte(identifier)).String() + return uuid.NewHash(sha256.New(), uuid.NameSpaceOID, []byte(identifier), 6).String() } func (c *Client) Do(req *http.Request) (*http.Response, error) { From 44bdf8ecadbd037005aaeeadbb2cee6d626769da Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Thu, 2 Feb 2023 18:11:59 +0100 Subject: [PATCH 058/561] ADR: automate runner updates (#2244) --- adrs/2023-02-02-automate-runner-updates.md | 39 +++++++++++++++++++ ...000-TEMPLATE.md => yyyy-mm-dd-TEMPLATE.md} | 0 2 files changed, 39 insertions(+) create mode 100644 adrs/2023-02-02-automate-runner-updates.md rename adrs/{0000-TEMPLATE.md => yyyy-mm-dd-TEMPLATE.md} (100%) diff --git a/adrs/2023-02-02-automate-runner-updates.md b/adrs/2023-02-02-automate-runner-updates.md new file mode 100644 index 0000000000..f88f0a325e --- /dev/null +++ b/adrs/2023-02-02-automate-runner-updates.md @@ -0,0 +1,39 @@ +# Automate updating runner version + +**Status**: Proposed + +## Context + +When a new [runner](https://github.com/actions/runner) version is released, new +images need to be built in +[actions-runner-controller/releases](https://github.com/actions-runner-controller/releases). +This is currently started by the +[release-runners](https://github.com/actions/actions-runner-controller/blob/master/.github/workflows/release-runners.yaml) +workflow, although this only starts when the set of file containing the runner +version is updated (and this is currently done manually). + +## Decision + +We can have another workflow running on a cadence (hourly seems sensible) and checking for new runner +releases, creating a PR updating `RUNNER_VERSION` in: +- `.github/workflows/release-runners.yaml` +- `Makefile` +- `runner/Makefile` +- `test/e2e/e2e_test.go` + +Once that PR is merged, the existing workflow will pick things up. + +## Consequences + +We don't have to add an extra step to the runner release process and a direct +dependency on ARC. Since images won't be built until the generated PR is merged +we still have room to wait before triggering a build should there be any +problems with the runner release. + +## Considered alternatives + +We also considered firing the workflow to create the PR via +`repository_dispatch` as part of the release process of runner itself, but we +discarded it because that would have required a PAT or a GitHub app with `repo` +scope within the Actions org and would have added a new direct dependency on the +runner side. diff --git a/adrs/0000-TEMPLATE.md b/adrs/yyyy-mm-dd-TEMPLATE.md similarity index 100% rename from adrs/0000-TEMPLATE.md rename to adrs/yyyy-mm-dd-TEMPLATE.md From 5726603863920bc47559689f66d751e1aeea1637 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Fri, 3 Feb 2023 17:27:31 +0100 Subject: [PATCH 059/561] Avoid deleting scale set if annotation is not parsable or if it does not exist (#2239) --- .../autoscalingrunnerset_controller.go | 7 +- .../autoscalingrunnerset_controller_test.go | 190 +++++++++++++++--- github/actions/fake/client.go | 7 + 3 files changed, 179 insertions(+), 25 deletions(-) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 391ca9667a..db0a4f9d7b 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -412,8 +412,11 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex logger.Info("Deleting the runner scale set from Actions service") runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) if err != nil { - logger.Error(err, "Failed to parse runner scale set ID") - return err + // If the annotation is not set correctly, or if it does not exist, we are going to get stuck in a loop trying to parse the scale set id. + // If the configuration is invalid (secret does not exist for example), we never get to the point to create runner set. But then, manual cleanup + // would get stuck finalizing the resource trying to parse annotation indefinitely + logger.Info("autoscaling runner set does not have annotation describing scale set id. Skip deletion", "err", err.Error()) + return nil } actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index 911b8c4d00..65bebe8cbd 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -8,6 +8,7 @@ import ( corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" logf "sigs.k8s.io/controller-runtime/pkg/log" . "github.com/onsi/ginkgo/v2" @@ -15,7 +16,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/github/actions/fake" ) @@ -29,7 +30,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { var ctx context.Context var cancel context.CancelFunc autoscalingNS := new(corev1.Namespace) - autoscalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet) + autoscalingRunnerSet := new(v1alpha1.AutoscalingRunnerSet) configSecret := new(corev1.Secret) BeforeEach(func() { @@ -73,12 +74,12 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { min := 1 max := 10 - autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ + Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, MaxRunners: &max, @@ -118,7 +119,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { Context("When creating a new AutoScalingRunnerSet", func() { It("It should create/add all required resources for a new AutoScalingRunnerSet (finalizer, runnerscaleset, ephemeralrunnerset, listener)", func() { // Check if finalizer is added - created := new(actionsv1alpha1.AutoscalingRunnerSet) + created := new(v1alpha1.AutoscalingRunnerSet) Eventually( func() (string, error) { err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created) @@ -157,7 +158,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // Check if ephemeral runner set is created Eventually( func() (int, error) { - runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + runnerSetList := new(v1alpha1.EphemeralRunnerSetList) err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) if err != nil { return 0, err @@ -171,13 +172,13 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // Check if listener is created Eventually( func() error { - return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(v1alpha1.AutoscalingListener)) }, autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") // Check if status is updated - runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + runnerSetList := new(v1alpha1.EphemeralRunnerSetList) err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") Expect(len(runnerSetList.Items)).To(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created") @@ -189,7 +190,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { Eventually( func() (int, error) { - updated := new(actionsv1alpha1.AutoscalingRunnerSet) + updated := new(v1alpha1.AutoscalingRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated) if err != nil { return 0, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err) @@ -206,7 +207,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // Wait till the listener is created Eventually( func() error { - return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(v1alpha1.AutoscalingListener)) }, autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") @@ -218,7 +219,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // Check if the listener is deleted Eventually( func() error { - err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) + err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(v1alpha1.AutoscalingListener)) if err != nil && errors.IsNotFound(err) { return nil } @@ -231,7 +232,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // Check if all the EphemeralRunnerSet is deleted Eventually( func() error { - runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + runnerSetList := new(v1alpha1.EphemeralRunnerSetList) err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) if err != nil { return err @@ -249,7 +250,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // Check if the AutoScalingRunnerSet is deleted Eventually( func() error { - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingRunnerSet)) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, new(v1alpha1.AutoscalingRunnerSet)) if err != nil && errors.IsNotFound(err) { return nil } @@ -264,7 +265,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { Context("When updating a new AutoScalingRunnerSet", func() { It("It should re-create EphemeralRunnerSet and Listener as needed when updating AutoScalingRunnerSet", func() { // Wait till the listener is created - listener := new(actionsv1alpha1.AutoscalingListener) + listener := new(v1alpha1.AutoscalingListener) Eventually( func() error { return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener) @@ -272,7 +273,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") - runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + runnerSetList := new(v1alpha1.EphemeralRunnerSetList) err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet") @@ -289,7 +290,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // We should create a new EphemeralRunnerSet and delete the old one, eventually, we will have only one EphemeralRunnerSet Eventually( func() (string, error) { - runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + runnerSetList := new(v1alpha1.EphemeralRunnerSetList) err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) if err != nil { return "", err @@ -307,7 +308,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // We should create a new listener Eventually( func() (string, error) { - listener := new(actionsv1alpha1.AutoscalingListener) + listener := new(v1alpha1.AutoscalingListener) err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener) if err != nil { return "", err @@ -320,13 +321,13 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // Only update the Spec for the AutoScalingListener // This should trigger re-creation of the Listener only - runnerSetList = new(actionsv1alpha1.EphemeralRunnerSetList) + runnerSetList = new(v1alpha1.EphemeralRunnerSetList) err = k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet") runnerSet = runnerSetList.Items[0] - listener = new(actionsv1alpha1.AutoscalingListener) + listener = new(v1alpha1.AutoscalingListener) err = k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener) Expect(err).NotTo(HaveOccurred(), "failed to get Listener") @@ -339,7 +340,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // We should not re-create a new EphemeralRunnerSet Consistently( func() (string, error) { - runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList) + runnerSetList := new(v1alpha1.EphemeralRunnerSetList) err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) if err != nil { return "", err @@ -357,7 +358,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { // We should only re-create a new listener Eventually( func() (string, error) { - listener := new(actionsv1alpha1.AutoscalingListener) + listener := new(v1alpha1.AutoscalingListener) err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener) if err != nil { return "", err @@ -370,11 +371,11 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { }) It("It should update RunnerScaleSet's runner group on service when it changes", func() { - updated := new(actionsv1alpha1.AutoscalingRunnerSet) + updated := new(v1alpha1.AutoscalingRunnerSet) // Wait till the listener is created Eventually( func() error { - return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener)) + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, new(v1alpha1.AutoscalingListener)) }, autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created") @@ -426,3 +427,146 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { }) }) }) + +var _ = Describe("Test AutoscalingController creation failures", func() { + Context("When autoscaling runner set creation fails on the client", func() { + var ctx context.Context + var cancel context.CancelFunc + autoscalingNS := new(corev1.Namespace) + configSecret := new(corev1.Secret) + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.TODO()) + autoscalingNS = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)}, + } + + err := k8sClient.Create(ctx, autoscalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") + + configSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-config-secret", + Namespace: autoscalingNS.Name, + }, + Data: map[string][]byte{ + "github_token": []byte(autoscalingRunnerSetTestGitHubToken), + }, + } + + err = k8sClient.Create(ctx, configSecret) + Expect(err).NotTo(HaveOccurred(), "failed to create config secret") + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{}) + Expect(err).NotTo(HaveOccurred(), "failed to create manager") + + controller := &AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ControllerNamespace: autoscalingNS.Name, + DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", + ActionsClient: fake.NewMultiClient(), + } + err = controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + go func() { + defer GinkgoRecover() + + err := mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred(), "failed to start manager") + }() + }) + + AfterEach(func() { + defer cancel() + + err := k8sClient.Delete(ctx, autoscalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") + }) + + It("It should be able to clean up if annotation related to scale set id does not exist", func() { + min := 1 + max := 10 + autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + MaxRunners: &max, + MinRunners: &min, + RunnerGroup: "testgroup", + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + err := k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + // wait for the finalizer to be added + ars := new(v1alpha1.AutoscalingRunnerSet) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, ars) + if err != nil { + return "", err + } + if len(ars.Finalizers) == 0 { + return "", nil + } + return ars.Finalizers[0], nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer") + + ars.ObjectMeta.Annotations = make(map[string]string) + err = k8sClient.Update(ctx, ars) + Expect(err).NotTo(HaveOccurred(), "Update autoscaling runner set without annotation should be successful") + + Eventually( + func() (bool, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, ars) + if err != nil { + return false, err + } + return len(ars.ObjectMeta.Annotations) == 0, nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo(true), "Autoscaling runner set should be updated with empty annotations") + + err = k8sClient.Delete(ctx, ars) + Expect(err).NotTo(HaveOccurred(), "Delete autoscaling runner set should be successful") + + Eventually( + func() (bool, error) { + updated := new(v1alpha1.AutoscalingRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated) + if err == nil { + return false, nil + } + if !errors.IsNotFound(err) { + return false, err + } + + return !controllerutil.ContainsFinalizer(updated, autoscalingRunnerSetFinalizerName), nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo(true), "Finalizer and resource should eventually be deleted") + }) + }) +}) diff --git a/github/actions/fake/client.go b/github/actions/fake/client.go index 29321b84c7..5d7e22b790 100644 --- a/github/actions/fake/client.go +++ b/github/actions/fake/client.go @@ -31,6 +31,13 @@ func WithGetRunner(runner *actions.RunnerReference, err error) Option { } } +func WithCreateRunnerScaleSet(scaleSet *actions.RunnerScaleSet, err error) Option { + return func(f *FakeClient) { + f.createRunnerScaleSetResult.RunnerScaleSet = scaleSet + f.createRunnerScaleSetResult.err = err + } +} + var defaultRunnerScaleSet = &actions.RunnerScaleSet{ Id: 1, Name: "testset", From b783afa562090f684c6aa246988f3c314647921b Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Mon, 6 Feb 2023 11:22:58 +0100 Subject: [PATCH 060/561] Add new workflow to automate runner updates (#2247) Co-authored-by: Nikola Jokic --- .github/workflows/release-runners.yaml | 22 ++-- .github/workflows/update-runners.yaml | 107 ++++++++++++++++++ runner/VERSION | 1 + ...nner-dind-rootless.ubuntu-20.04.dockerfile | 2 +- ...nner-dind-rootless.ubuntu-22.04.dockerfile | 2 +- ...ctions-runner-dind.ubuntu-20.04.dockerfile | 2 +- ...ctions-runner-dind.ubuntu-22.04.dockerfile | 2 +- runner/actions-runner.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner.ubuntu-22.04.dockerfile | 2 +- 9 files changed, 129 insertions(+), 13 deletions(-) create mode 100644 .github/workflows/update-runners.yaml create mode 100644 runner/VERSION diff --git a/.github/workflows/release-runners.yaml b/.github/workflows/release-runners.yaml index 726a8bf7a9..5e6f1efefb 100644 --- a/.github/workflows/release-runners.yaml +++ b/.github/workflows/release-runners.yaml @@ -3,23 +3,20 @@ name: Runners # Revert to https://github.com/actions-runner-controller/releases#releases # for details on why we use this approach on: - # We must do a trigger on a push: instead of a types: closed so GitHub Secrets + # We must do a trigger on a push: instead of a types: closed so GitHub Secrets # are available to the workflow run push: branches: - 'master' paths: - - 'runner/**' - - '!runner/Makefile' - - '.github/workflows/runners.yaml' - - '!**.md' + - 'runner/VERSION' + - '.github/workflows/release-runners.yaml' env: - # Safeguard to prevent pushing images to registeries after build + # Safeguard to prevent pushing images to registeries after build PUSH_TO_REGISTRIES: true TARGET_ORG: actions-runner-controller TARGET_WORKFLOW: release-runners.yaml - RUNNER_VERSION: 2.301.1 DOCKER_VERSION: 20.10.21 RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0 @@ -28,6 +25,13 @@ jobs: name: Trigger Build and Push of Runner Images runs-on: ubuntu-latest steps: + - uses: actions/checkout@v3 + - name: Get runner version + id: runner_version + run: | + version=$(echo -n $(cat runner/VERSION)) + echo runner_version=$version >> $GITHUB_OUTPUT + - name: Get Token id: get_workflow_token uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db @@ -37,6 +41,8 @@ jobs: organization: ${{ env.TARGET_ORG }} - name: Trigger Build And Push Runner Images To Registries + env: + RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }} run: | # Authenticate gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }} @@ -50,6 +56,8 @@ jobs: -f push_to_registries=${{ env.PUSH_TO_REGISTRIES }} - name: Job summary + env: + RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }} run: | echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/update-runners.yaml b/.github/workflows/update-runners.yaml new file mode 100644 index 0000000000..d97776c160 --- /dev/null +++ b/.github/workflows/update-runners.yaml @@ -0,0 +1,107 @@ +# This workflows polls releases from actions/runner and in case of a new one it +# updates files containing runner version and opens a pull request. +name: Update runners + +on: + schedule: + # run daily + - cron: "0 9 * * *" + workflow_dispatch: + +jobs: + # check_versions compares our current version and the latest available runner + # version and sets them as outputs. + check_versions: + runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ github.token }} + outputs: + current_version: ${{ steps.versions.outputs.current_version }} + latest_version: ${{ steps.versions.outputs.latest_version }} + steps: + - uses: actions/checkout@v3 + + - name: Get current and latest versions + id: versions + run: | + CURRENT_VERSION=$(echo -n $(cat runner/VERSION)) + echo "Current version: $CURRENT_VERSION" + echo current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT + + LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1) + echo "Latest version: $LATEST_VERSION" + echo latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT + + # check_pr checks if a PR for the same update already exists. It only runs if + # runner latest version != our current version. If no existing PR is found, + # it sets a PR name as output. + check_pr: + runs-on: ubuntu-latest + needs: check_versions + if: needs.check_versions.outputs.current_version != needs.check_versions.outputs.latest_version + outputs: + pr_name: ${{ steps.pr_name.outputs.pr_name }} + env: + GH_TOKEN: ${{ github.token }} + steps: + - name: debug + run: + echo ${{ needs.check_versions.outputs.current_version }} + echo ${{ needs.check_versions.outputs.latest_version }} + - uses: actions/checkout@v3 + + - name: PR Name + id: pr_name + env: + LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }} + run: | + PR_NAME="Update runner to version ${LATEST_VERSION}" + + result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1) + if [ -z "$result" ] + then + echo "No existing PRs found, setting output with pr_name=$PR_NAME" + echo pr_name=$PR_NAME >> $GITHUB_OUTPUT + else + echo "Found a PR with title '$PR_NAME' already existing: ${{ github.server_url }}/${{ github.repository }}/pull/$result" + fi + + # update_version updates runner version in the files listed below, commits + # the changes and opens a pull request as `github-actions` bot. + update_version: + runs-on: ubuntu-latest + needs: + - check_versions + - check_pr + if: needs.check_pr.outputs.pr_name + permissions: + pull-requests: write + contents: write + env: + GH_TOKEN: ${{ github.token }} + CURRENT_VERSION: ${{ needs.check_versions.outputs.current_version }} + LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }} + PR_NAME: ${{ needs.check_pr.outputs.pr_name }} + + steps: + - uses: actions/checkout@v3 + - name: New branch + run: git checkout -b update-runner-$LATEST_VERSION + - name: Update files + run: | + sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION + sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile + sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile + sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go + + - name: Commit changes + run: | + # from https://github.com/orgs/community/discussions/26560 + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config user.name "github-actions[bot]" + git add . + git commit -m "$PR_NAME" + git push -u origin HEAD + + - name: Create pull request + run: gh pr create -f diff --git a/runner/VERSION b/runner/VERSION new file mode 100644 index 0000000000..b886075fa2 --- /dev/null +++ b/runner/VERSION @@ -0,0 +1 @@ +2.301.1 diff --git a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile index eb36d8a37d..9cb3b34379 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.301.1 +ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ENV CHANNEL=stable diff --git a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile index cc7a1c850c..d91fc7ef07 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.301.1 +ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ENV CHANNEL=stable diff --git a/runner/actions-runner-dind.ubuntu-20.04.dockerfile b/runner/actions-runner-dind.ubuntu-20.04.dockerfile index 0e8e790acd..f0ea6f07b8 100644 --- a/runner/actions-runner-dind.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-20.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.301.1 +ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable diff --git a/runner/actions-runner-dind.ubuntu-22.04.dockerfile b/runner/actions-runner-dind.ubuntu-22.04.dockerfile index e549ca753d..37f9c3e15d 100644 --- a/runner/actions-runner-dind.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-22.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.301.1 +ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable diff --git a/runner/actions-runner.ubuntu-20.04.dockerfile b/runner/actions-runner.ubuntu-20.04.dockerfile index 3c4ae5a156..6017b0ec19 100644 --- a/runner/actions-runner.ubuntu-20.04.dockerfile +++ b/runner/actions-runner.ubuntu-20.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.301.1 +ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable diff --git a/runner/actions-runner.ubuntu-22.04.dockerfile b/runner/actions-runner.ubuntu-22.04.dockerfile index e4c304f9d5..4150f77d9c 100644 --- a/runner/actions-runner.ubuntu-22.04.dockerfile +++ b/runner/actions-runner.ubuntu-22.04.dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM -ARG RUNNER_VERSION=2.301.1 +ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable From 90f3c85cee05db2ae4fec3a24eb6b6a48ee884e7 Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Tue, 7 Feb 2023 08:47:59 +0100 Subject: [PATCH 061/561] Add options to multi client (#2257) --- github/actions/fake/multi_client.go | 4 ++-- github/actions/multi_client.go | 14 ++++++++------ github/actions/multi_client_test.go | 26 ++++++++++++++++++++++++++ 3 files changed, 36 insertions(+), 8 deletions(-) diff --git a/github/actions/fake/multi_client.go b/github/actions/fake/multi_client.go index 95c0c6fcdf..e662510263 100644 --- a/github/actions/fake/multi_client.go +++ b/github/actions/fake/multi_client.go @@ -34,10 +34,10 @@ func NewMultiClient(opts ...MultiClientOption) actions.MultiClient { return f } -func (f *fakeMultiClient) GetClientFor(ctx context.Context, githubConfigURL string, creds actions.ActionsAuth, namespace string) (actions.ActionsService, error) { +func (f *fakeMultiClient) GetClientFor(ctx context.Context, githubConfigURL string, creds actions.ActionsAuth, namespace string, options ...actions.ClientOption) (actions.ActionsService, error) { return f.defaultClient, f.defaultErr } -func (f *fakeMultiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData actions.KubernetesSecretData) (actions.ActionsService, error) { +func (f *fakeMultiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData actions.KubernetesSecretData, options ...actions.ClientOption) (actions.ActionsService, error) { return f.defaultClient, f.defaultErr } diff --git a/github/actions/multi_client.go b/github/actions/multi_client.go index 1e2041398d..bfef889371 100644 --- a/github/actions/multi_client.go +++ b/github/actions/multi_client.go @@ -11,8 +11,8 @@ import ( ) type MultiClient interface { - GetClientFor(ctx context.Context, githubConfigURL string, creds ActionsAuth, namespace string) (ActionsService, error) - GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData KubernetesSecretData) (ActionsService, error) + GetClientFor(ctx context.Context, githubConfigURL string, creds ActionsAuth, namespace string, options ...ClientOption) (ActionsService, error) + GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData KubernetesSecretData, options ...ClientOption) (ActionsService, error) } type multiClient struct { @@ -52,7 +52,7 @@ func NewMultiClient(userAgent string, logger logr.Logger) MultiClient { } } -func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, creds ActionsAuth, namespace string) (ActionsService, error) { +func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, creds ActionsAuth, namespace string, options ...ClientOption) (ActionsService, error) { m.logger.Info("retrieve actions client", "githubConfigURL", githubConfigURL, "namespace", namespace) if creds.Token == "" && creds.AppCreds == nil { @@ -66,8 +66,10 @@ func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, client, err := NewClient( githubConfigURL, &creds, - WithUserAgent(m.userAgent), - WithLogger(m.logger), + append([]ClientOption{ + WithUserAgent(m.userAgent), + WithLogger(m.logger), + }, options...)..., ) if err != nil { return nil, err @@ -98,7 +100,7 @@ func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, type KubernetesSecretData map[string][]byte -func (m *multiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData KubernetesSecretData) (ActionsService, error) { +func (m *multiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData KubernetesSecretData, options ...ClientOption) (ActionsService, error) { if len(secretData) == 0 { return nil, fmt.Errorf("must provide secret data with either PAT or GitHub App Auth") } diff --git a/github/actions/multi_client_test.go b/github/actions/multi_client_test.go index a61686c658..80d54a3fe4 100644 --- a/github/actions/multi_client_test.go +++ b/github/actions/multi_client_test.go @@ -49,6 +49,32 @@ func TestMultiClientCaching(t *testing.T) { assert.Len(t, multiClient.clients, 2) } +func TestMultiClientOptions(t *testing.T) { + logger := logr.Discard() + ctx := context.Background() + + defaultNamespace := "default" + defaultConfigURL := "https://github.com/org/repo" + defaultCreds := &ActionsAuth{ + Token: "token", + } + + multiClient := NewMultiClient("test-user-agent", logger) + service, err := multiClient.GetClientFor( + ctx, + defaultConfigURL, + *defaultCreds, + defaultNamespace, + WithUserAgent("test-option"), + ) + require.NoError(t, err) + + client := service.(*Client) + req, err := client.NewGitHubAPIRequest(ctx, "GET", "/test", nil) + require.NoError(t, err) + assert.Equal(t, "test-option", req.Header.Get("User-Agent")) +} + func TestCreateJWT(t *testing.T) { key := `-----BEGIN RSA PRIVATE KEY----- MIICWgIBAAKBgHXfRT9cv9UY9fAAD4+1RshpfSSZe277urfEmPfX3/Og9zJYRk// From 3858695e42a0bbaea8ed5194bea3a830d4840b0b Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 7 Feb 2023 12:37:09 -0500 Subject: [PATCH 062/561] Remove un-required permissions for the manager-role of the new `AutoScalingRunnerSet` (#2260) --- .../templates/manager_role.yaml | 82 +------------------ .../tests/template_test.go | 2 +- config/rbac/role.yaml | 20 ----- .../autoscalingrunnerset_controller.go | 2 - .../ephemeralrunner_controller.go | 8 +- .../ephemeralrunnerset_controller.go | 2 + 6 files changed, 6 insertions(+), 110 deletions(-) diff --git a/charts/actions-runner-controller-2/templates/manager_role.yaml b/charts/actions-runner-controller-2/templates/manager_role.yaml index 6b68e603c8..34639b9a2f 100644 --- a/charts/actions-runner-controller-2/templates/manager_role.yaml +++ b/charts/actions-runner-controller-2/templates/manager_role.yaml @@ -110,14 +110,6 @@ rules: - apiGroups: - "" resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - namespaces - pods verbs: - create @@ -130,57 +122,9 @@ rules: - apiGroups: - "" resources: - - persistentvolumeclaims - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - namespaces/status - pods/status verbs: - get -- apiGroups: - - "" - resources: - - persistentvolumes - verbs: - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - pods/finalizers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - "" resources: @@ -223,28 +167,4 @@ rules: - get - update - list - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - get -- apiGroups: - - "" - resources: - - pods/log - verbs: - - get - - list - - watch -- apiGroups: - - "batch" - resources: - - jobs - verbs: - - get - - list - - create - - delete + - watch \ No newline at end of file diff --git a/charts/actions-runner-controller-2/tests/template_test.go b/charts/actions-runner-controller-2/tests/template_test.go index 6459fb0336..1aa2f93995 100644 --- a/charts/actions-runner-controller-2/tests/template_test.go +++ b/charts/actions-runner-controller-2/tests/template_test.go @@ -162,7 +162,7 @@ func TestTemplate_CreateManagerRole(t *testing.T) { assert.Empty(t, managerRole.Namespace, "ClusterRole should not have a namespace") assert.Equal(t, "test-arc-actions-runner-controller-2-manager-role", managerRole.Name) - assert.Equal(t, 25, len(managerRole.Rules)) + assert.Equal(t, 17, len(managerRole.Rules)) } func TestTemplate_ManagerRoleBinding(t *testing.T) { diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 29ab888173..b80a869de4 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -306,26 +306,6 @@ rules: verbs: - create - patch -- apiGroups: - - "" - resources: - - namespaces - - pods - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - namespaces/status - - pods/status - verbs: - - get - apiGroups: - "" resources: diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index db0a4f9d7b..9b14b51592 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -68,8 +68,6 @@ type AutoscalingRunnerSetReconciler struct { resourceBuilder resourceBuilder } -// +kubebuilder:rbac:groups=core,resources=namespaces;pods,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=namespaces/status;pods/status,verbs=get // +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets/status,verbs=get;update;patch // +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets/finalizers,verbs=update diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go index 7ee546cb07..dd12a50f39 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller.go +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -59,12 +59,8 @@ type EphemeralRunnerReconciler struct { // +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get;update;patch // +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/finalizers,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;delete -// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch -// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=create;delete;get -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;delete;get -// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;delete;get +// +kubebuilder:rbac:groups=core,resources=pods/status,verbs=get +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=create;get;list;watch;delete // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index 6744e1bad2..08ecc2daf9 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -56,6 +56,8 @@ type EphemeralRunnerSetReconciler struct { //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. From 479cc89c1b1d2940ebdd326a26aefab6045cda24 Mon Sep 17 00:00:00 2001 From: Ferenc Hammerl <31069338+fhammerl@users.noreply.github.com> Date: Wed, 8 Feb 2023 14:42:45 +0100 Subject: [PATCH 063/561] Port ADRs from internal repo (#2267) --- adrs/2022-10-17-runner-image.md | 90 +++++++++++++++++++ adrs/2022-10-27-runnerscaleset-lifetime.md | 54 +++++++++++ adrs/2022-11-04-crd-api-group-name.md | 51 +++++++++++ .../2022-12-05-adding-labels-k8s-resources.md | 80 +++++++++++++++++ ...-27-pick-the-right-runner-to-scale-down.md | 88 ++++++++++++++++++ 5 files changed, 363 insertions(+) create mode 100644 adrs/2022-10-17-runner-image.md create mode 100644 adrs/2022-10-27-runnerscaleset-lifetime.md create mode 100644 adrs/2022-11-04-crd-api-group-name.md create mode 100644 adrs/2022-12-05-adding-labels-k8s-resources.md create mode 100644 adrs/2022-12-27-pick-the-right-runner-to-scale-down.md diff --git a/adrs/2022-10-17-runner-image.md b/adrs/2022-10-17-runner-image.md new file mode 100644 index 0000000000..17b0b7e1c7 --- /dev/null +++ b/adrs/2022-10-17-runner-image.md @@ -0,0 +1,90 @@ +# ADR 0001: Produce the runner image for the scaleset client +**Date**: 2022-10-17 + +**Status**: Done + +# Context + +user can bring their own runner images, the contract we have are: +- It must have a runner binary under /actions-runner (/actions-runner/run.sh exists) +- The WORKDIR is set to /actions-runner +- If the user inside the container is root, the ENV RUNNER_ALLOW_RUNASROOT should be set to 1 + +The existing ARC runner images will not work with the new ARC mode out-of-box for the following reason: + +- The current runner image requires caller to pass runner configure info, ex: URL and Config Token +- The current runner image has the runner binary under /runner +- The current runner image requires a special entrypoint script in order to work around some volume mount limitation for setting up DinD. + +However, since we expose the raw runner Pod spec to our user, advanced user can modify the helm values.yaml to make everything lines up properly. + +# Guiding Principles + +- Build image is separated in two stages. + +## The first stage (build) +- Reuses the same base image, so it is faster to build. +- Installs utilities needed to download assets (runner and runner-container-hooks). +- Downloads the runner and stores it into `/actions-runner` directory. +- Downloads the runner-container-hooks and stores it into `/actions-runner/k8s` directory. +- You can use build arguments to control the runner version, the target platform and runner container hooks version. + +Preview: + +```Dockerfile +FROM mcr.microsoft.com/dotnet/runtime-deps:6.0 as build + +ARG RUNNER_ARCH="x64" +ARG RUNNER_VERSION=2.298.2 +ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.3 + +RUN apt update -y && apt install curl unzip -y + +WORKDIR /actions-runner +RUN curl -f -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${RUNNER_ARCH}-${RUNNER_VERSION}.tar.gz \ + && tar xzf ./runner.tar.gz \ + && rm runner.tar.gz + +RUN curl -f -L -o runner-container-hooks.zip https://github.com/actions/runner-container-hooks/releases/download/v${RUNNER_CONTAINER_HOOKS_VERSION}/actions-runner-hooks-k8s-${RUNNER_CONTAINER_HOOKS_VERSION}.zip \ + && unzip ./runner-container-hooks.zip -d ./k8s \ + && rm runner-container-hooks.zip +``` + +## The main image: +- Copies assets from the build stage to `/actions-runner` +- Does not provide an entrypoint. The entrypoint should be set within the container definition. + +Preview: + +```Dockerfile +FROM mcr.microsoft.com/dotnet/runtime-deps:6.0 + +WORKDIR /actions-runner +COPY --from=build /actions-runner . +``` + +## Example of pod spec with the init container copying assets +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: +spec: + containers: + - name: runner + image: + command: ["/runner/run.sh"] + volumeMounts: + - name: runner + mountPath: /runner + initContainers: + - name: setup + image: + command: ["sh", "-c", "cp -r /actions-runner/* /runner/"] + volumeMounts: + - name: runner + mountPath: /runner + volumes: + - name: runner + emptyDir: {} +``` diff --git a/adrs/2022-10-27-runnerscaleset-lifetime.md b/adrs/2022-10-27-runnerscaleset-lifetime.md new file mode 100644 index 0000000000..a66d44a63a --- /dev/null +++ b/adrs/2022-10-27-runnerscaleset-lifetime.md @@ -0,0 +1,54 @@ +# ADR 0003: Lifetime of RunnerScaleSet on Service + +**Date**: 2022-10-27 + +**Status**: Done + +## Context + +We have created the RunnerScaleSet object and APIs around it on the GitHub Actions service for better support of any self-hosted runner auto-scale solution, like [actions-runner-controller](https://github.com/actions-runner-controller/actions-runner-controller). + +The `RunnerScaleSet` object will represent a set of homogeneous self-hosted runners to the Actions service job routing system. + +A `RunnerScaleSet` client (ARC) needs to communicate with the Actions service via HTTP long-poll in a certain protocol to get a workflow job successfully landed on one of its homogeneous self-hosted runners. + +In this ADR, I want to discuss the following within the context of actions-runner-controller's new scaling mode: +- Who and how to create a RunnerScaleSet on the service? +- Who and how to delete a RunnerScaleSet on the service? +- What will happen to all the runners and jobs when the deletion happens? + +## RunnerScaleSet creation + +- `AutoScalingRunnerSet` custom resource controller will create the `RunnerScaleSet` object in the Actions service on any `AutoScalingRunnerSet` resource deployment. +- The creation is via REST API on Actions service `POST _apis/runtime/runnerscalesets` +- The creation needs to use the runner registration token (admin). +- `RunnerScaleSet.Name` == `AutoScalingRunnerSet.metadata.Name` +- The created `RunnerScaleSet` will only have 1 label and it's the `RunnerScaleSet`'s name +- `AutoScalingRunnerSet` controller will store the `RunnerScaleSet.Id` as an annotation on the k8s resource for future lookup. + +## RunnerScaleSet modification + +- When the user patch existing `AutoScalingRunnerSet`'s RunnerScaleSet related properly, ex: `runnerGroupName`, `runnerWorkDir`, the controller needs to make an HTTP PATCH call to the `_apis/runtime/runnerscalesets/2` endpoint in order to update the object on the service. +- We will put the deployed `AutoScalingRunnerSet` resource in an error state when the user tries to patch the resource with a different `githubConfigUrl` +> Basically, you can't move a deployed `AutoScalingRunnerSet` across GitHub entity, repoA->repoB, repoA->OrgC, etc. +> We evaluated blocking the change before instead of erroring at runtime and that we decided not to go down this route because it forces us to re-introduce admission webhooks (require cert-manager). + +## RunnerScaleSet deletion + +- `AutoScalingRunnerSet` custom resource controller will delete the `RunnerScaleSet` object in the Actions service on any `AutoScalingRunnerSet` resource deletion. +> `AutoScalingRunnerSet` deletion will contain several steps: +> - Stop the listener app so no more new jobs coming and no more scaling up/down. +> - Request scale down to 0 +> - Force stop all runners +> - Wait for the scale down to 0 +> - Delete the `RunnerScaleSet` object from service via REST API +- The deletion is via REST API on Actions service `DELETE _apis/runtime/runnerscalesets/1` +- The deletion needs to use the runner registration token (admin). + +The user's `RunnerScaleSet` will be deleted from the service by `DormantRunnerScaleSetCleanupJob` if the particular `AutoScalingRunnerSet` has not connected to the service for the past 7 days. We have a similar rule for self-hosted runners. + +## Jobs and Runners on deletion + +- `RunnerScaleSet` deletion will be blocked if there is any job assigned to a runner within the `RunnerScaleSet`, which has to scale down to 0 before deletion. +- Any job that has been assigned to the `RunnerScaleSet` but hasn't been assigned to a runner within the `RunnerScaleSet` will get thrown back to the queue and wait for assignment again. +- Any offline runners within the `RunnerScaleSet` will be deleted from the service side. diff --git a/adrs/2022-11-04-crd-api-group-name.md b/adrs/2022-11-04-crd-api-group-name.md new file mode 100644 index 0000000000..e3aabcdd79 --- /dev/null +++ b/adrs/2022-11-04-crd-api-group-name.md @@ -0,0 +1,51 @@ +# ADR 0004: Technical detail about actions-runner-controller repository transfer +**Date**: 2022-11-04 + +**Status**: Done + +# Context + +As part of ARC Private Beta: Repository Migration & Open Sourcing Process, we have decided to transfer the current [actions-runner-controller repository](https://github.com/actions-runner-controller/actions-runner-controller) into the [Actions org](https://github.com/actions). + +**Goals:** +- A clear signal that GitHub will start taking over ARC and provide support. +- Since we are going to deprecate the existing auto-scale mode in ARC at some point, we want to have a clear separation between the legacy mode (not supported) and the new mode (supported). +- Avoid disrupting users as much as we can, existing ARC users will not notice any difference after the repository transfer, they can keep upgrading to the newer version of ARC and keep using the legacy mode. + +**Challenges** +- The original creator's name (`summerwind`) is all over the place, including some critical parts of ARC: + - The k8s user resource API's full name is `actions.summerwind.dev/v1alpha1/RunnerDeployment`, renaming it to `actions.github.com` is a breaking change and will force the user to rebuild their entire k8s cluster. + - All docker images around ARC (controller + default runner) is published to [dockerhub/summerwind](https://hub.docker.com/u/summerwind) +- The helm chart for ARC is currently hosted on [GitHub pages](https://actions-runner-controller.github.io/actions-runner-controller) for https://github.com/actions-runner-controller/actions-runner-controller, moving the repository means we will break users who install ARC via the helm chart + + +# Decisions + +## APIs group names for k8s custom resources, `actions.summerwind` or `actions.github` + +- We will not rename any existing ARC resources API name after moving the repository under Actions org. (keep `summerwind` for old stuff) +- For any new resource API we are going to add, those will be named properly under GitHub, ex: `actions.github.com/v1alpha1/AutoScalingRunnerSet` + +Benefits: +- A clear separation from existing ARC: + - Easy for the support engineer to triage income tickets and figure out whether we need to support the use case from the user +- We won't break existing users when they upgrade to a newer version of ARC after the repository transfer + +Based on the spike done by `@nikola-jokic`, we have confidence that we can host multiple resources with different API names under the same repository, and the published ARC controller can handle both resources properly. + +## ARC Docker images + +We will not start using the GitHub container registry for hosting ARC images (controller + runner images) right after the repository transfer. + +But over time, we will start using GHCR for hosting those images along with our deprecation story. + +## Helm chart + +We will recreate the https://github.com/actions-runner-controller/actions-runner-controller repository after the repository transfer. + +The recreated repository will only contain the helm chart assets which keep powering the https://actions-runner-controller.github.io/actions-runner-controller for users to install ARC via Helm. + +Long term, we will switch to hosting the helm chart on GHCR (OCI) instead of using GitHub Pages. + +This will require a one-time change to our users by running +`helm repo remove actions-runner-controller` and `helm repo add actions-runner-controller oci://ghcr.io/actions` diff --git a/adrs/2022-12-05-adding-labels-k8s-resources.md b/adrs/2022-12-05-adding-labels-k8s-resources.md new file mode 100644 index 0000000000..1ce4246ef3 --- /dev/null +++ b/adrs/2022-12-05-adding-labels-k8s-resources.md @@ -0,0 +1,80 @@ +# ADR 0007: Adding labels to our resources + +**Date**: 2022-12-05 + +**Status**: Done + +## Context + +users need to provide us with logs so that we can help support and troubleshoot their issues. We need a way for our users to filter and retrieve the logs we need. + +## Proposal + +A good start would be a catch-all label to get all logs that are +ARC-related: one of the [recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/) +is `app.kubernetes.io/part-of` and we can set that for all ARC components +to be `actions-runner-controller`. + +Assuming standard logging that would allow us to get all ARC logs by running + +```bash +kubectl logs -l 'app.kubernetes.io/part-of=actions-runner-controller' +``` +which would be very useful for development to begin with. + +The proposal is to add these sets of labels to the pods ARC creates: + +#### controller-manager +Labels to be set by the Helm chart: +```yaml +metadata: + labels: + app.kubernetes.io/part-of: actions-runner-controller + app.kubernetes.io/component: controller-manager + app.kubernetes.io/version: "x.x.x" +``` + +#### Listener +Labels to be set by controller at creation: +```yaml +metadata: + labels: + app.kubernetes.io/part-of: actions-runner-controller + app.kubernetes.io/component: runner-scale-set-listener + app.kubernetes.io/version: "x.x.x" + actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet + + # the following labels are to be extracted by the config URL + actions.github.com/enterprise: enterprise + actions.github.com/organization: organization + actions.github.com/repository: repository +``` + +#### Runner +Labels to be set by controller at creation: +```yaml +metadata: + labels: + app.kubernetes.io/part-of: actions-runner-controller + app.kubernetes.io/component: runner + app.kubernetes.io/version: "x.x.x" + actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet + actions.github.com/runner-name: runner-name + actions.github.com/runner-group-name: runner-group-name + + # the following labels are to be extracted by the config URL + actions.github.com/enterprise: enterprise + actions.github.com/organization: organization + actions.github.com/repository: repository +``` + +This would allow us to ask users: + +> Can you please send us the logs coming from pods labelled 'app.kubernetes.io/part-of=actions-runner-controller'? + +Or for example if they're having problems specifically with runners: + +> Can you please send us the logs coming from pods labelled 'app.kubernetes.io/component=runner'? + +This way users don't have to understand ARC moving parts but we still have a +way to target them specifically if we need to. diff --git a/adrs/2022-12-27-pick-the-right-runner-to-scale-down.md b/adrs/2022-12-27-pick-the-right-runner-to-scale-down.md new file mode 100644 index 0000000000..217925233c --- /dev/null +++ b/adrs/2022-12-27-pick-the-right-runner-to-scale-down.md @@ -0,0 +1,88 @@ +# ADR 0008: Pick the right runner to scale down +**Date**: 2022-12-27 + +**Status**: Done + +## Context + +- A custom resource `EphemeralRunnerSet` manage a set of custom resource `EphemeralRunners` +- The `EphemeralRunnerSet` has `Replicas` in its `Spec`, and the responsibility of the `EphemeralRunnerSet_controller` is to reconcile a given `EphemeralRunnerSet` to have + the same amount of `EphemeralRunners` as the `Spec.Replicas` defined. + - This means the `EphemeralRunnerSet_controller` will scale up the `EphemeralRunnerSet` by creating more `EphemeralRunner` in the case of the `Spec.Replicas` is higher than + the current amount of `EphemeralRunners`. + - This also means the `EphemeralRunnerSet_controller` will scale down the `EphemeralRunnerSet` by finding some existing `EphemeralRunner` to delete in the case of + the `Spec.Replicas` is less than the current amount of `EphemeralRunners`. + + This ADR is about how can we find the right existing `EphemeralRunner` to delete when we need to scale down. + + + ## Current approach + +1. `EphemeralRunnerSet_controller` figure out how many `EphemeralRunner` it needs to delete, ex: need to scale down from 10 to 2 means we need to delete 8 `EphemeralRunner` + +2. `EphemeralRunnerSet_controller` find all `EphemeralRunner` that is in the `Running` or `Pending` phase. + > `Pending` means the `EphemeralRunner` is still probably creating and a runner has not yet configured with the Actions service. + > `Running` means the `EphemeralRunner` is created and a runner has probably configured with Actions service, the runner may sit there idle, + > or maybe actively running a workflow job. We don't have a clear answer for it from the ARC side. (Actions service knows it for sure) + +3. `EphemeralRunnerSet_controller` make an HTTP DELETE request to the Actions service for each `EphemeralRunner` from the previous step and ask the Actions service to delete the runner via `RunnerId`. +(The `RunnerId` is generated after the runner registered with the Actions service, and stored on the `EphemeralRunner.Status.RunnerId`) + > - The HTTP DELETE request looks like the following: + > `DELETE https://pipelines.actions.githubusercontent.com/WoxlUxJHrKEzIp4Nz3YmrmLlZBonrmj9xCJ1lrzcJ9ZsD1Tnw7/_apis/distributedtask/pools/0/agents/1024` + > The Actions service will return 2 types of responses: + > 1. 204 (No Content): The runner with Id 1024 has been successfully removed from the service or the runner with Id 1024 doesn't exist. + > 2. 400 (Bad Request) with JSON body that contains an error message like `JobStillRunningException`: The service can't remove this runner at this point since it has been + > assigned to a job request, the client won't be able to remove the runner until the runner finishes its current assigned job request. + +4. `EphemeralRunnerSet_controller` will ignore any deletion error from runners that are still running a job, and keep trying deletion until the amount of `204` equals the amount of +`EphemeralRunner` needs to delete. + +## The problem with the current approach + +In a busy `AutoScalingRunnerSet`, the scale up and down may happen all the time as jobs are queued up and jobs finished. + +We will make way too many HTTP requests to the Actions service and ask it to try to delete a certain runner, and rely on the exception from the service to figure out what to do next. + +The runner deletion request is not cheap to the service, for synchronization, the `JobStillRunningException` is raised from the DB call for the request. + +So we are wasting resources on both the Actions service (extra load to the database) and the actions-runner-controller (useless outgoing HTTP requests). + +In the test ARC that I deployed to Azure, the ARC controller tried to delete RunnerId 12408 for `bbq-beets/ting-test` a total of 35 times within 10 minutes. + +## Root cause + +The `EphemeralRunnerSet_controller` doesn't know whether a given `EphemeralRunner` is actually running a workflow job or not +(it only knows the runner is configured at the service), so it can't filter out the `EphemeralRunner`. + +## Additional context + +The legacy ARC's custom resource allows the runner image to leverage the RunnerJobHook feature to update the status of the runner custom resource in K8S (Mark the runner as running workflow run Id XXX). + +This brings a good value to users as it can provide some insight about which runner is running which job for all the runners in the cluster and it looks pretty close to what we want to fix the [root cause](#root-cause) + +However, the legacy ARC approach means the service account for running the runner pod needs to have elevated permission to update the custom resource, +this would be a big `NO` from a security point of view since we may not trust the code running inside the runner pod. + +## Possible Solution + +The nature of the k8s controller-runtime means we might reconcile the resource base on stale cache data. + +I think our goal for the solution should be: +- Reduce wasteful HTTP requests on a scale-down as much as we can. +- We can accept that we might make 1 or 2 wasteful requests to Actions service, but we can't accept making 5/10+ of them. +- See if we can meet feature parity with what the RunnerJobHook support with compromise any security concerns. + +Since the root cause of why the reconciliation can't skip an `EphemeralRunner` is that we don't know whether an `EphemeralRunner` is running a job, +a simple thought is how about we somehow attach some info to the `EphemeralRunner` to indicate it's currently running a job? + +How about we send this info from the service to the auto-scaling-listener via the existing HTTP long-poll +and let the listener patch the `EphemeralRunner.Status` to indicate it's running a job? +> The listener is normally in a separate namespace with elevated permission and it's something we can trust. + +Changes: +- Introduce a new message type `JobStarted` (in addition to the existing `JobAvailable/JobAssigned/JobCompleted`) on the service side, the message is sent when a runner of the `RunnerScaleSet` get assigned to a job, + `RequestId`, `RunnerId`, and `RunnerName` will be included in the message. +- Add `RequestId (int)` to `EphemeralRunner.Status`, this will indicate which job the runner is running. +- The `AutoScalingListener` will base on the payload of this new message to patch `EphemeralRunners/RunnerName/Status` with the `RequestId` +- When `EphemeralRunnerSet_controller` try to find `EphemeralRunner` to delete on a scale down, it will skip any `EphemeralRunner` that has `EphemeralRunner.Status.RequestId` set. +- In the future, we can expose more info to this `JobStarted` message and introduce more property under `EphemeralRunner.Status` to reach feature parity with legacy ARC's RunnerJobHook From 4c4f82ca54d420d0e44f96037b0f1683f43b3007 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Wed, 8 Feb 2023 15:21:13 +0100 Subject: [PATCH 064/561] Early return if finalizer does not exist to make it more readable (#2262) --- .../autoscalinglistener_controller.go | 43 +++++------ .../autoscalingrunnerset_controller.go | 75 ++++++++++--------- .../ephemeralrunner_controller.go | 61 +++++++-------- .../ephemeralrunnerset_controller.go | 41 +++++----- 4 files changed, 112 insertions(+), 108 deletions(-) diff --git a/controllers/actions.github.com/autoscalinglistener_controller.go b/controllers/actions.github.com/autoscalinglistener_controller.go index 4078278f01..faf2e4e61f 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller.go +++ b/controllers/actions.github.com/autoscalinglistener_controller.go @@ -73,30 +73,31 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl. } if !autoscalingListener.ObjectMeta.DeletionTimestamp.IsZero() { - if controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) { - log.Info("Deleting resources") - done, err := r.cleanupResources(ctx, autoscalingListener, log) - if err != nil { - log.Error(err, "Failed to cleanup resources after deletion") - return ctrl.Result{}, err - } - if !done { - log.Info("Waiting for resources to be deleted before removing finalizer") - return ctrl.Result{}, nil - } + if !controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) { + return ctrl.Result{}, nil + } - log.Info("Removing finalizer") - err = patch(ctx, r.Client, autoscalingListener, func(obj *v1alpha1.AutoscalingListener) { - controllerutil.RemoveFinalizer(obj, autoscalingListenerFinalizerName) - }) - if err != nil && !kerrors.IsNotFound(err) { - log.Error(err, "Failed to remove finalizer") - return ctrl.Result{}, err - } + log.Info("Deleting resources") + done, err := r.cleanupResources(ctx, autoscalingListener, log) + if err != nil { + log.Error(err, "Failed to cleanup resources after deletion") + return ctrl.Result{}, err + } + if !done { + log.Info("Waiting for resources to be deleted before removing finalizer") + return ctrl.Result{}, nil + } - log.Info("Successfully removed finalizer after cleanup") + log.Info("Removing finalizer") + err = patch(ctx, r.Client, autoscalingListener, func(obj *v1alpha1.AutoscalingListener) { + controllerutil.RemoveFinalizer(obj, autoscalingListenerFinalizerName) + }) + if err != nil && !kerrors.IsNotFound(err) { + log.Error(err, "Failed to remove finalizer") + return ctrl.Result{}, err } - return ctrl.Result{}, nil + + log.Info("Successfully removed finalizer after cleanup") } if !controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) { diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 9b14b51592..b956d2815d 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -86,48 +86,49 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl } if !autoscalingRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() { - if controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) { - log.Info("Deleting resources") - done, err := r.cleanupListener(ctx, autoscalingRunnerSet, log) - if err != nil { - log.Error(err, "Failed to clean up listener") - return ctrl.Result{}, err - } - if !done { - // we are going to get notified anyway to proceed with rest of the - // cleanup. No need to re-queue - log.Info("Waiting for listener to be deleted") - return ctrl.Result{}, nil - } + if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) { + return ctrl.Result{}, nil + } - done, err = r.cleanupEphemeralRunnerSets(ctx, autoscalingRunnerSet, log) - if err != nil { - log.Error(err, "Failed to clean up ephemeral runner sets") - return ctrl.Result{}, err - } - if !done { - log.Info("Waiting for ephemeral runner sets to be deleted") - return ctrl.Result{}, nil - } + log.Info("Deleting resources") + done, err := r.cleanupListener(ctx, autoscalingRunnerSet, log) + if err != nil { + log.Error(err, "Failed to clean up listener") + return ctrl.Result{}, err + } + if !done { + // we are going to get notified anyway to proceed with rest of the + // cleanup. No need to re-queue + log.Info("Waiting for listener to be deleted") + return ctrl.Result{}, nil + } - err = r.deleteRunnerScaleSet(ctx, autoscalingRunnerSet, log) - if err != nil { - log.Error(err, "Failed to delete runner scale set") - return ctrl.Result{}, err - } + done, err = r.cleanupEphemeralRunnerSets(ctx, autoscalingRunnerSet, log) + if err != nil { + log.Error(err, "Failed to clean up ephemeral runner sets") + return ctrl.Result{}, err + } + if !done { + log.Info("Waiting for ephemeral runner sets to be deleted") + return ctrl.Result{}, nil + } - log.Info("Removing finalizer") - err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { - controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName) - }) - if err != nil && !kerrors.IsNotFound(err) { - log.Error(err, "Failed to update autoscaling runner set without finalizer") - return ctrl.Result{}, err - } + err = r.deleteRunnerScaleSet(ctx, autoscalingRunnerSet, log) + if err != nil { + log.Error(err, "Failed to delete runner scale set") + return ctrl.Result{}, err + } - log.Info("Successfully removed finalizer after cleanup") + log.Info("Removing finalizer") + err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { + controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName) + }) + if err != nil && !kerrors.IsNotFound(err) { + log.Error(err, "Failed to update autoscaling runner set without finalizer") + return ctrl.Result{}, err } - return ctrl.Result{}, nil + + log.Info("Successfully removed finalizer after cleanup") } if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) { diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go index dd12a50f39..f2aca7b173 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller.go +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -76,40 +76,41 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ } if !ephemeralRunner.ObjectMeta.DeletionTimestamp.IsZero() { - if controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) { - log.Info("Finalizing ephemeral runner") - done, err := r.cleanupResources(ctx, ephemeralRunner, log) - if err != nil { - log.Error(err, "Failed to clean up ephemeral runner owned resources") - return ctrl.Result{}, err - } - if !done { - log.Info("Waiting for ephemeral runner owned resources to be deleted") - return ctrl.Result{}, nil - } + if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) { + return ctrl.Result{}, nil + } - done, err = r.cleanupContainerHooksResources(ctx, ephemeralRunner, log) - if err != nil { - log.Error(err, "Failed to clean up container hooks resources") - return ctrl.Result{}, err - } - if !done { - log.Info("Waiting for container hooks resources to be deleted") - return ctrl.Result{RequeueAfter: 5 * time.Second}, nil - } + log.Info("Finalizing ephemeral runner") + done, err := r.cleanupResources(ctx, ephemeralRunner, log) + if err != nil { + log.Error(err, "Failed to clean up ephemeral runner owned resources") + return ctrl.Result{}, err + } + if !done { + log.Info("Waiting for ephemeral runner owned resources to be deleted") + return ctrl.Result{}, nil + } - log.Info("Removing finalizer") - err = patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { - controllerutil.RemoveFinalizer(obj, ephemeralRunnerFinalizerName) - }) - if err != nil && !kerrors.IsNotFound(err) { - log.Error(err, "Failed to update ephemeral runner without the finalizer") - return ctrl.Result{}, err - } + done, err = r.cleanupContainerHooksResources(ctx, ephemeralRunner, log) + if err != nil { + log.Error(err, "Failed to clean up container hooks resources") + return ctrl.Result{}, err + } + if !done { + log.Info("Waiting for container hooks resources to be deleted") + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } - log.Info("Successfully removed finalizer after cleanup") - return ctrl.Result{}, nil + log.Info("Removing finalizer") + err = patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + controllerutil.RemoveFinalizer(obj, ephemeralRunnerFinalizerName) + }) + if err != nil && !kerrors.IsNotFound(err) { + log.Error(err, "Failed to update ephemeral runner without the finalizer") + return ctrl.Result{}, err } + + log.Info("Successfully removed finalizer after cleanup") return ctrl.Result{}, nil } diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index 08ecc2daf9..e1840a4efa 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -81,29 +81,30 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R // Requested deletion does not need reconciled. if !ephemeralRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() { - if controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) { - log.Info("Deleting resources") - done, err := r.cleanUpEphemeralRunners(ctx, ephemeralRunnerSet, log) - if err != nil { - log.Error(err, "Failed to clean up EphemeralRunners") - return ctrl.Result{}, err - } - if !done { - log.Info("Waiting for resources to be deleted") - return ctrl.Result{}, nil - } - - log.Info("Removing finalizer") - if err := patch(ctx, r.Client, ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) { - controllerutil.RemoveFinalizer(obj, ephemeralRunnerSetFinalizerName) - }); err != nil && !kerrors.IsNotFound(err) { - log.Error(err, "Failed to update ephemeral runner set with removed finalizer") - return ctrl.Result{}, err - } + if !controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) { + return ctrl.Result{}, nil + } - log.Info("Successfully removed finalizer after cleanup") + log.Info("Deleting resources") + done, err := r.cleanUpEphemeralRunners(ctx, ephemeralRunnerSet, log) + if err != nil { + log.Error(err, "Failed to clean up EphemeralRunners") + return ctrl.Result{}, err + } + if !done { + log.Info("Waiting for resources to be deleted") return ctrl.Result{}, nil } + + log.Info("Removing finalizer") + if err := patch(ctx, r.Client, ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) { + controllerutil.RemoveFinalizer(obj, ephemeralRunnerSetFinalizerName) + }); err != nil && !kerrors.IsNotFound(err) { + log.Error(err, "Failed to update ephemeral runner set with removed finalizer") + return ctrl.Result{}, err + } + + log.Info("Successfully removed finalizer after cleanup") return ctrl.Result{}, nil } From bc29601ae0e3c6ddba89de253b90f3e61d1c18b8 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Sun, 12 Feb 2023 01:55:12 +0100 Subject: [PATCH 065/561] EphemeralRunner: On cleanup, if pod is pending, delete from service (#2255) Co-authored-by: Tingluo Huang --- .../ephemeralrunner_controller.go | 63 ++++++++++++++++++- .../ephemeralrunner_controller_test.go | 12 ++-- 2 files changed, 68 insertions(+), 7 deletions(-) diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go index f2aca7b173..e6bfc9cb11 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller.go +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -43,7 +43,8 @@ const ( // It represents the name of the container running the self-hosted runner image. EphemeralRunnerContainerName = "runner" - ephemeralRunnerFinalizerName = "ephemeralrunner.actions.github.com/finalizer" + ephemeralRunnerFinalizerName = "ephemeralrunner.actions.github.com/finalizer" + ephemeralRunnerActionsFinalizerName = "ephemeralrunner.actions.github.com/runner-registration-finalizer" ) // EphemeralRunnerReconciler reconciles a EphemeralRunner object @@ -80,6 +81,24 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, nil } + if controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) { + switch ephemeralRunner.Status.Phase { + case corev1.PodSucceeded: + // deleted by the runner set, we can just remove finalizer without API calls + err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + controllerutil.RemoveFinalizer(obj, ephemeralRunnerActionsFinalizerName) + }) + if err != nil { + log.Error(err, "Failed to update ephemeral runner without runner registration finalizer") + return ctrl.Result{}, err + } + log.Info("Successfully removed runner registration finalizer") + return ctrl.Result{}, nil + default: + return r.cleanupRunnerFromService(ctx, ephemeralRunner, log) + } + } + log.Info("Finalizing ephemeral runner") done, err := r.cleanupResources(ctx, ephemeralRunner, log) if err != nil { @@ -114,6 +133,19 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, nil } + if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) { + log.Info("Adding runner registration finalizer") + err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + controllerutil.AddFinalizer(obj, ephemeralRunnerActionsFinalizerName) + }) + if err != nil { + log.Error(err, "Failed to update with runner registration finalizer set") + return ctrl.Result{}, err + } + + log.Info("Successfully added runner registration finalizer") + } + if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) { log.Info("Adding finalizer") if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { @@ -236,6 +268,33 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ } } +func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) { + actionsError := &actions.ActionsError{} + err := r.deleteRunnerFromService(ctx, ephemeralRunner, log) + if err != nil { + if errors.As(err, &actionsError) && + actionsError.StatusCode == http.StatusBadRequest && + strings.Contains(actionsError.ExceptionName, "JobStillRunningException") { + log.Info("Runner is still running the job. Re-queue in 30 seconds") + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + log.Error(err, "Failed clean up runner from the service") + return ctrl.Result{}, err + } + + log.Info("Successfully removed runner registration from service") + err = patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + controllerutil.RemoveFinalizer(obj, ephemeralRunnerActionsFinalizerName) + }) + if err != nil { + return ctrl.Result{}, err + } + + log.Info("Successfully removed runner registration finalizer") + return ctrl.Result{}, nil +} + func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (deleted bool, err error) { log.Info("Cleaning up the runner pod") pod := new(corev1.Pod) @@ -614,7 +673,7 @@ func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, log.Info("Removing runner from the service", "runnerId", ephemeralRunner.Status.RunnerId) err = client.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)) if err != nil { - return fmt.Errorf("failed to remove runner from the service: %v", err) + return fmt.Errorf("failed to remove runner from the service: %w", err) } log.Info("Removed runner from the service", "runnerId", ephemeralRunner.Status.RunnerId) diff --git a/controllers/actions.github.com/ephemeralrunner_controller_test.go b/controllers/actions.github.com/ephemeralrunner_controller_test.go index f8166abeb2..ba5d9fb201 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunner_controller_test.go @@ -153,19 +153,21 @@ var _ = Describe("EphemeralRunner", func() { created := new(v1alpha1.EphemeralRunner) // Check if finalizer is added Eventually( - func() (string, error) { + func() ([]string, error) { err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, created) if err != nil { - return "", err + return nil, err } if len(created.Finalizers) == 0 { - return "", nil + return nil, nil } - return created.Finalizers[0], nil + + n := len(created.Finalizers) // avoid capacity mismatch + return created.Finalizers[:n:n], nil }, timeout, interval, - ).Should(BeEquivalentTo(ephemeralRunnerFinalizerName)) + ).Should(BeEquivalentTo([]string{ephemeralRunnerActionsFinalizerName, ephemeralRunnerFinalizerName})) Eventually( func() (bool, error) { From ff7f4ebc3a4c91f9ecfffdaec2dbff2686aa4c28 Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Tue, 14 Feb 2023 11:11:46 +0000 Subject: [PATCH 066/561] Add testserver package (#2281) --- github/actions/testserver/server.go | 115 ++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 github/actions/testserver/server.go diff --git a/github/actions/testserver/server.go b/github/actions/testserver/server.go new file mode 100644 index 0000000000..49ff7073fb --- /dev/null +++ b/github/actions/testserver/server.go @@ -0,0 +1,115 @@ +package testserver + +import ( + "net/http" + "net/http/httptest" + "strings" + "time" + + "github.com/golang-jwt/jwt/v4" + "github.com/onsi/ginkgo/v2" + "github.com/stretchr/testify/require" +) + +// New returns a new httptest.Server that handles the +// authentication requests neeeded to create a new client. Any requests not +// made to the /actions/runners/registration-token or +// /actions/runner-registration endpoints will be handled by the provided +// handler. The returned server is started and will be automatically closed +// when the test ends. +// +// TODO: this uses ginkgo interface _only_ to support our current controller tests +func New(t ginkgo.GinkgoTInterface, handler http.Handler, options ...actionsServerOption) *actionsServer { + s := NewUnstarted(t, handler, options...) + s.Start() + return s +} + +// TODO: this uses ginkgo interface _only_ to support our current controller tests +func NewUnstarted(t ginkgo.GinkgoTInterface, handler http.Handler, options ...actionsServerOption) *actionsServer { + s := httptest.NewUnstartedServer(handler) + server := &actionsServer{ + Server: s, + } + t.Cleanup(func() { + server.Close() + }) + + for _, option := range options { + option(server) + } + + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // handle getRunnerRegistrationToken + if strings.HasSuffix(r.URL.Path, "/runners/registration-token") { + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"token":"token"}`)) + return + } + + // handle getActionsServiceAdminConnection + if strings.HasSuffix(r.URL.Path, "/actions/runner-registration") { + if server.token == "" { + server.token = DefaultActionsToken(t) + } + + w.Write([]byte(`{"url":"` + s.URL + `/tenant/123/","token":"` + server.token + `"}`)) + return + } + + handler.ServeHTTP(w, r) + }) + + server.Config.Handler = h + + return server +} + +type actionsServerOption func(*actionsServer) + +func WithActionsToken(token string) actionsServerOption { + return func(s *actionsServer) { + s.token = token + } +} + +type actionsServer struct { + *httptest.Server + + token string +} + +func (s *actionsServer) ConfigURLForOrg(org string) string { + return s.URL + "/" + org +} + +func DefaultActionsToken(t ginkgo.GinkgoTInterface) string { + claims := &jwt.RegisteredClaims{ + IssuedAt: jwt.NewNumericDate(time.Now().Add(-10 * time.Minute)), + ExpiresAt: jwt.NewNumericDate(time.Now().Add(10 * time.Minute)), + Issuer: "123", + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) + privateKey, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(samplePrivateKey)) + require.NoError(t, err) + tokenString, err := token.SignedString(privateKey) + require.NoError(t, err) + return tokenString +} + +const samplePrivateKey = `-----BEGIN RSA PRIVATE KEY----- +MIICWgIBAAKBgHXfRT9cv9UY9fAAD4+1RshpfSSZe277urfEmPfX3/Og9zJYRk// +CZrJVD1CaBZDiIyQsNEzjta7r4UsqWdFOggiNN2E7ZTFQjMSaFkVgrzHqWuiaCBf +/BjbKPn4SMDmTzHvIe7Nel76hBdCaVgu6mYCW5jmuSH5qz/yR1U1J/WJAgMBAAEC +gYARWGWsSU3BYgbu5lNj5l0gKMXNmPhdAJYdbMTF0/KUu18k/XB7XSBgsre+vALt +I8r4RGKApoGif8P4aPYUyE8dqA1bh0X3Fj1TCz28qoUL5//dA+pigCRS20H7HM3C +ojoqF7+F+4F2sXmzFNd1NgY5RxFPYosTT7OnUiFuu2IisQJBALnMLe09LBnjuHXR +xxR65DDNxWPQLBjW3dL+ubLcwr7922l6ZIQsVjdeE0ItEUVRjjJ9/B/Jq9VJ/Lw4 +g9LCkkMCQQCiaM2f7nYmGivPo9hlAbq5lcGJ5CCYFfeeYzTxMqum7Mbqe4kk5lgb +X6gWd0Izg2nGdAEe/97DClO6VpKcPbpDAkBTR/JOJN1fvXMxXJaf13XxakrQMr+R +Yr6LlSInykyAz8lJvlLP7A+5QbHgN9NF/wh+GXqpxPwA3ukqdSqhjhWBAkBn6mDv +HPgR5xrzL6XM8y9TgaOlJAdK6HtYp6d/UOmN0+Butf6JUq07TphRT5tXNJVgemch +O5x/9UKfbrc+KyzbAkAo97TfFC+mZhU1N5fFelaRu4ikPxlp642KRUSkOh8GEkNf +jQ97eJWiWtDcsMUhcZgoB5ydHcFlrBIn6oBcpge5 +-----END RSA PRIVATE KEY-----` From 3c23f682eb3738b403eaf4f128a1c281af2b673a Mon Sep 17 00:00:00 2001 From: Ava Stancu Date: Tue, 14 Feb 2023 15:06:46 +0100 Subject: [PATCH 067/561] Added workflow to be triggered via rest api dispatch in e2e test (#2283) --- .../workflows/e2e-test-dispatch-workflow.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .github/workflows/e2e-test-dispatch-workflow.yaml diff --git a/.github/workflows/e2e-test-dispatch-workflow.yaml b/.github/workflows/e2e-test-dispatch-workflow.yaml new file mode 100644 index 0000000000..4e0e5d6ff1 --- /dev/null +++ b/.github/workflows/e2e-test-dispatch-workflow.yaml @@ -0,0 +1,16 @@ +name: ARC-REUSABLE-WORKFLOW +on: + workflow_dispatch: + inputs: + date_time: + description: 'Datetime for runner name uniqueness, format: %Y-%m-%d-%H-%M-%S-%3N, example: 2023-02-14-13-00-16-791' + required: true +jobs: + arc-runner-job: + strategy: + fail-fast: false + matrix: + job: [1, 2, 3] + runs-on: arc-runner-${{ inputs.date_time }} + steps: + - run: echo "Hello World!" >> $GITHUB_STEP_SUMMARY From 63488642d6f91b0a6158e236fce09622a5c659d5 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Wed, 15 Feb 2023 16:29:49 +0100 Subject: [PATCH 068/561] Add EKS test environment Terraform templates (#2290) Co-authored-by: Francesco Renzi --- test/platforms/aws-eks/.gitignore | 16 ++++ test/platforms/aws-eks/.terraform.lock.hcl | 104 +++++++++++++++++++++ test/platforms/aws-eks/README.md | 81 ++++++++++++++++ test/platforms/aws-eks/main.tf | 82 ++++++++++++++++ test/platforms/aws-eks/outputs.tf | 14 +++ test/platforms/aws-eks/terraform.tf | 26 ++++++ test/platforms/azure-aks/.keep | 0 test/platforms/gcp-gks/.keep | 0 8 files changed, 323 insertions(+) create mode 100644 test/platforms/aws-eks/.gitignore create mode 100644 test/platforms/aws-eks/.terraform.lock.hcl create mode 100644 test/platforms/aws-eks/README.md create mode 100644 test/platforms/aws-eks/main.tf create mode 100644 test/platforms/aws-eks/outputs.tf create mode 100644 test/platforms/aws-eks/terraform.tf create mode 100644 test/platforms/azure-aks/.keep create mode 100644 test/platforms/gcp-gks/.keep diff --git a/test/platforms/aws-eks/.gitignore b/test/platforms/aws-eks/.gitignore new file mode 100644 index 0000000000..dc0ad0e531 --- /dev/null +++ b/test/platforms/aws-eks/.gitignore @@ -0,0 +1,16 @@ +**/.terraform/* +.terraformrc +terraform.rc + +*.tfstate +*.tfstate.* +*.tfvars +*.tfvars.json + +crash.log +crash.*.log + +override.tf +override.tf.json +*_override.tf +*_override.tf.json \ No newline at end of file diff --git a/test/platforms/aws-eks/.terraform.lock.hcl b/test/platforms/aws-eks/.terraform.lock.hcl new file mode 100644 index 0000000000..9e1de9412e --- /dev/null +++ b/test/platforms/aws-eks/.terraform.lock.hcl @@ -0,0 +1,104 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "4.54.0" + constraints = ">= 3.72.0, >= 3.73.0, >= 4.47.0, ~> 4.54.0" + hashes = [ + "h1:j/L01+hlHVM2X2VrkQC2WtMZyu4ZLhDMw+HDJ7k0Y2Q=", + "zh:24358aefc06b3f38878680fe606dab2570cb58ab952750c47e90b81d3b05e606", + "zh:3fc0ef459d6bb4fbb0e4eb7b8adadddd636efa6d975be6e70de7327d83e15729", + "zh:67e765119726f47b1916316ac95c3cd32ac074b454f2a67b6127120b476bc483", + "zh:71aed1300debac24f11263a6f8a231c6432497b25e623e8f34e27121af65f523", + "zh:722043077e63713d4e458f3228be30c21fcff5b6660c6de8b96967337cdc604a", + "zh:76d67be4220b93cfaca0882f46db9a42b4ca48285a64fe304f108dde85f4d611", + "zh:81534c18d9f02648b1644a7937e7bea56e91caef13b41de121ee51168faad680", + "zh:89983ab2596846d5f3413ff1b5b9b21424c3c757a54dcc5a4604d3ac34fea1a6", + "zh:8a603ac6884de5dc51c372f641f9613aefd87059ff6e6a74b671f6864226e06f", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:b6fae6c1cda6d842406066dac7803d24a597b62da5fae33bcd50c5dae70140c2", + "zh:bc4c3b4bfb715beecf5186dfeb91173ef1a9c0b68e8c45cbeee180195bbfa37f", + "zh:c741a3fe7d085593a160e79596bd237afc9503c836abcc95fd627554cdf16ec0", + "zh:f6763e96485e1ea5b67a33bbd04042e412508b2b06946acf957fb68a314d893e", + "zh:fc7144577ea7d6e05c276b54a9f8f8609be7b4d0a128aa45f233a4b0e5cbf090", + ] +} + +provider "registry.terraform.io/hashicorp/cloudinit" { + version = "2.2.0" + constraints = ">= 2.0.0, ~> 2.2.0" + hashes = [ + "h1:tQLNREqesrdCQ/bIJnl0+yUK+XfdWzAG0wo4lp10LvM=", + "zh:76825122171f9ea2287fd27e23e80a7eb482f6491a4f41a096d77b666896ee96", + "zh:795a36dee548e30ca9c9d474af9ad6d29290e0a9816154ad38d55381cd0ab12d", + "zh:9200f02cb917fb99e44b40a68936fd60d338e4d30a718b7e2e48024a795a61b9", + "zh:a33cf255dc670c20678063aa84218e2c1b7a67d557f480d8ec0f68bc428ed472", + "zh:ba3c1b2cd0879286c1f531862c027ec04783ece81de67c9a3b97076f1ce7f58f", + "zh:bd575456394428a1a02191d2e46af0c00e41fd4f28cfe117d57b6aeb5154a0fb", + "zh:c68dd1db83d8437c36c92dc3fc11d71ced9def3483dd28c45f8640cfcd59de9a", + "zh:cbfe34a90852ed03cc074601527bb580a648127255c08589bc3ef4bf4f2e7e0c", + "zh:d6ffd7398c6d1f359b96f5b757e77b99b339fbb91df1b96ac974fe71bc87695c", + "zh:d9c15285f847d7a52df59e044184fb3ba1b7679fd0386291ed183782683d9517", + "zh:f7dd02f6d36844da23c9a27bb084503812c29c1aec4aba97237fec16860fdc8c", + ] +} + +provider "registry.terraform.io/hashicorp/kubernetes" { + version = "2.17.0" + constraints = ">= 2.10.0" + hashes = [ + "h1:I1L2R+OPgGSh+P6uBSycvvoyRIey/FqMwSvlJ9ccw0o=", + "zh:1cbafea8c404195d8ad2490d75dbeebef131563d3e38dec87231ceb3923a3012", + "zh:26d9584423ee77e607999b082de7d9dc3e937934aa83341e0832e7253caf4f51", + "zh:333527fc15fb43bbf1898a2f058598c596468a01d88c415627bb617878dc4d4d", + "zh:391b8c80e3115af485977d6e949d7260b7fc0b641089b884256bfd36a7077db2", + "zh:4d18ba55247486181759d60195777945bcd68e17ccd980820ca18e8a8b94aeb5", + "zh:607ae94d85d1c1ed3845bd71095daadea4b2468e16f57fa05c98eab0de6b14ae", + "zh:95c6cf22f8ef14e7a4f85e33cff5d6f11056c7880041b71d425d1b5ebbe246e7", + "zh:b077edcedb46a313b461ac1e49317872063b3871f2acbe1a50498612cefff387", + "zh:c6a7891683e44148b0c928fd4748b7abac727266ab551d679015f5fe8b72d1e6", + "zh:e5cebfdf873770c37a4304362003d3fea8d6c2fd819663ad121bc65bb81e4738", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:feb19269e7c0de473ad412b37818b48da0cc91e5c93dd4c77a72676ca97a16b1", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.4.3" + constraints = "~> 3.4.3" + hashes = [ + "h1:xZGZf18JjMS06pFa4NErzANI98qi59SEcBsOcS2P2yQ=", + "zh:41c53ba47085d8261590990f8633c8906696fa0a3c4b384ff6a7ecbf84339752", + "zh:59d98081c4475f2ad77d881c4412c5129c56214892f490adf11c7e7a5a47de9b", + "zh:686ad1ee40b812b9e016317e7f34c0d63ef837e084dea4a1f578f64a6314ad53", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:84103eae7251384c0d995f5a257c72b0096605048f757b749b7b62107a5dccb3", + "zh:8ee974b110adb78c7cd18aae82b2729e5124d8f115d484215fd5199451053de5", + "zh:9dd4561e3c847e45de603f17fa0c01ae14cae8c4b7b4e6423c9ef3904b308dda", + "zh:bb07bb3c2c0296beba0beec629ebc6474c70732387477a65966483b5efabdbc6", + "zh:e891339e96c9e5a888727b45b2e1bb3fcbdfe0fd7c5b4396e4695459b38c8cb1", + "zh:ea4739860c24dfeaac6c100b2a2e357106a89d18751f7693f3c31ecf6a996f8d", + "zh:f0c76ac303fd0ab59146c39bc121c5d7d86f878e9a69294e29444d4c653786f8", + "zh:f143a9a5af42b38fed328a161279906759ff39ac428ebcfe55606e05e1518b93", + ] +} + +provider "registry.terraform.io/hashicorp/tls" { + version = "4.0.4" + constraints = ">= 3.0.0, ~> 4.0.4" + hashes = [ + "h1:pe9vq86dZZKCm+8k1RhzARwENslF3SXb9ErHbQfgjXU=", + "zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55", + "zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848", + "zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be", + "zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5", + "zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe", + "zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e", + "zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48", + "zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8", + "zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60", + "zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e", + "zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} diff --git a/test/platforms/aws-eks/README.md b/test/platforms/aws-eks/README.md new file mode 100644 index 0000000000..354d1df8a6 --- /dev/null +++ b/test/platforms/aws-eks/README.md @@ -0,0 +1,81 @@ + +# Context + +Terraform templates to quickly create an EKS cluster with a managed node group. This is not a reference setup! It's a vanilla setup to be used when attempting to replicate issues and/or to test new features. + +⚠️ Do not use this setup in production. + +## Pre-requisites + +- Terraform v1.3+ installed locally. +- an AWS account +- the AWS CLI v2.7.0/v1.24.0 or newer, installed and configured +- AWS IAM Authenticator +- kubectl v1.24.0 or newer + +
+ Download & Authenticate + +```bash +brew install awscli aws-iam-authenticator terraform +``` + +Configure & authenticate AWS CLI. This will vary based on your AWS account and IAM setup + +
+ +## Setup + +```bash +# Export AWS region & profile env variables +export AWS_REGION="eu-west-2" # Replace with your region +export AWS_PROFILE="actions-compute" # Replace with your profile +``` + +```bash +# You're free to use terraform cloud but you need to update main.tf first +terraform init +``` + +```bash +# Run terraform plan +terraform plan +``` + +```bash +# Verify the plan output from the previous step +# Run terraform apply +terraform apply +``` + +```bash +# Retrieve access credentials for the cluster and configure kubectl +aws eks --region "${AWS_REGION}" update-kubeconfig \ + --name "$(terraform output -raw cluster_name)" \ + --profile "${AWS_PROFILE}" + +# If you get this error: 'NoneType' object is not iterable +# Remove the ~/.kube/config file and try again +# https://github.com/aws/aws-cli/issues/4843 +``` + +```bash +# Verify your installation +kubectl cluster-info +``` + +Setup ARC by following [this quick-start guide](https://github.com/actions/actions-runner-controller/tree/master/docs/preview/actions-runner-controller-2). + +### Troubleshooting + +#### dial tcp: lookup api.github.com: i/o timeout + +If you see this error in the controller pod logs: + +```log +ERROR AutoscalingRunnerSet Failed to initialize Actions service client for creating a new runner scale set {"autoscalingrunnerset": "arc-runners/arc-runner-set", "error": "failed to get runner registration token: Post \"https://api.github.com/app/installations/33454774/access_tokens\": POST https://api.github.com/app/installations/33454774/access_tokens giving up after 5 attempt(s): Post \"https://api.github.com/app/installations/33454774/access_tokens\": dial tcp: lookup api.github.com: i/o timeout"} +``` + +This is because the controller pod is not able to resolve the `api.github.com` domain name. This is a good guide for [troubleshooting DNS failures in EKS](https://aws.amazon.com/premiumsupport/knowledge-center/eks-dns-failure/). For a fresh setup this is most likely **a security group configuration problem.** + +The controller could have allocated to a node that cannot reach coredns. You need to allow the DNS (TCP / UDP) traffic to flow between the worker nodes' security groups. \ No newline at end of file diff --git a/test/platforms/aws-eks/main.tf b/test/platforms/aws-eks/main.tf new file mode 100644 index 0000000000..7bcc85971c --- /dev/null +++ b/test/platforms/aws-eks/main.tf @@ -0,0 +1,82 @@ +provider "aws" {} + +data "aws_availability_zones" "available" {} + +locals { + cluster_name = "arc-e2etests-eks-${random_string.suffix.result}" +} + +resource "random_string" "suffix" { + length = 8 + special = false +} + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "3.19.0" + + name = "arc-e2etests-vpc" + + cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + + private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] + + enable_nat_gateway = true + single_nat_gateway = true + enable_dns_hostnames = true + + public_subnet_tags = { + "kubernetes.io/cluster/${local.cluster_name}" = "shared" + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.cluster_name}" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + + tags = { + # Critical: GitHub specific tag + "catalog_service" = "actions-runner-controller" + } +} + +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "19.5.1" + + cluster_name = local.cluster_name + cluster_version = "1.24" + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + cluster_endpoint_public_access = true + + tags = { + # Critical: GitHub specific tag + # If removed, EC2 instance creation will fail + "catalog_service" = "actions-runner-controller" + } + + eks_managed_node_group_defaults = { + ami_type = "AL2_x86_64" + } + + eks_managed_node_groups = { + default = { + use_custom_launch_template = false + } + + primary = { + name = "primary-node-group" + + instance_types = ["t3.small"] + + min_size = 1 + max_size = 3 + desired_size = 2 + } + } +} \ No newline at end of file diff --git a/test/platforms/aws-eks/outputs.tf b/test/platforms/aws-eks/outputs.tf new file mode 100644 index 0000000000..61e5427251 --- /dev/null +++ b/test/platforms/aws-eks/outputs.tf @@ -0,0 +1,14 @@ +output "cluster_endpoint" { + description = "Endpoint for EKS control plane" + value = module.eks.cluster_endpoint +} + +output "cluster_security_group_id" { + description = "Security group ids" + value = module.eks.cluster_security_group_id +} + +output "cluster_name" { + description = "Cluster Name" + value = module.eks.cluster_name +} \ No newline at end of file diff --git a/test/platforms/aws-eks/terraform.tf b/test/platforms/aws-eks/terraform.tf new file mode 100644 index 0000000000..0328621a7a --- /dev/null +++ b/test/platforms/aws-eks/terraform.tf @@ -0,0 +1,26 @@ +terraform { + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.54.0" + } + + random = { + source = "hashicorp/random" + version = "~> 3.4.3" + } + + tls = { + source = "hashicorp/tls" + version = "~> 4.0.4" + } + + cloudinit = { + source = "hashicorp/cloudinit" + version = "~> 2.2.0" + } + } + + required_version = "~> 1.3" +} diff --git a/test/platforms/azure-aks/.keep b/test/platforms/azure-aks/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/platforms/gcp-gks/.keep b/test/platforms/gcp-gks/.keep new file mode 100644 index 0000000000..e69de29bb2 From 369c5e432901e0de5e69a662d08b2cc11538fbcc Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Wed, 15 Feb 2023 14:29:52 -0500 Subject: [PATCH 069/561] Fix helm chart when containerMode.type=dind. (#2291) --- .../templates/_helpers.tpl | 13 +++++----- .../tests/template_test.go | 26 +++++++++++++++++++ 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/charts/auto-scaling-runner-set/templates/_helpers.tpl b/charts/auto-scaling-runner-set/templates/_helpers.tpl index d4ca939fad..2ec151f683 100644 --- a/charts/auto-scaling-runner-set/templates/_helpers.tpl +++ b/charts/auto-scaling-runner-set/templates/_helpers.tpl @@ -208,18 +208,18 @@ env: {{- end }} {{- end }} {{- end }} - {{- if $setDockerHost }} + {{- end }} + {{- if $setDockerHost }} - name: DOCKER_HOST value: tcp://localhost:2376 - {{- end }} - {{- if $setDockerTlsVerify }} + {{- end }} + {{- if $setDockerTlsVerify }} - name: DOCKER_TLS_VERIFY value: "1" - {{- end }} - {{- if $setDockerCertPath }} + {{- end }} + {{- if $setDockerCertPath }} - name: DOCKER_CERT_PATH value: /certs/client - {{- end }} {{- end }} {{- $mountWork := 1 }} {{- $mountDindCert := 1 }} @@ -247,6 +247,7 @@ volumeMounts: {{- if $mountDindCert }} - name: dind-cert mountPath: /certs/client + readOnly: true {{- end }} {{- end }} {{- end }} diff --git a/charts/auto-scaling-runner-set/tests/template_test.go b/charts/auto-scaling-runner-set/tests/template_test.go index 954b8d29f9..f1f2858830 100644 --- a/charts/auto-scaling-runner-set/tests/template_test.go +++ b/charts/auto-scaling-runner-set/tests/template_test.go @@ -594,9 +594,35 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container") assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name) assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) + assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 3, "The runner container should have 3 env vars, DOCKER_HOST, DOCKER_TLS_VERIFY and DOCKER_CERT_PATH") + assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name) + assert.Equal(t, "tcp://localhost:2376", ars.Spec.Template.Spec.Containers[0].Env[0].Value) + assert.Equal(t, "DOCKER_TLS_VERIFY", ars.Spec.Template.Spec.Containers[0].Env[1].Name) + assert.Equal(t, "1", ars.Spec.Template.Spec.Containers[0].Env[1].Value) + assert.Equal(t, "DOCKER_CERT_PATH", ars.Spec.Template.Spec.Containers[0].Env[2].Name) + assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[0].Env[2].Value) + + assert.Len(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, 2, "The runner container should have 2 volume mounts, dind-cert and work") + assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name) + assert.Equal(t, "/actions-runner/_work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath) + assert.False(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].ReadOnly) + + assert.Equal(t, "dind-cert", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name) + assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath) + assert.True(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].ReadOnly) assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name) assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image) + assert.True(t, *ars.Spec.Template.Spec.Containers[1].SecurityContext.Privileged) + assert.Len(t, ars.Spec.Template.Spec.Containers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-cert, work and externals") + assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].Name) + assert.Equal(t, "/actions-runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath) + + assert.Equal(t, "dind-cert", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name) + assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath) + + assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name) + assert.Equal(t, "/actions-runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath) } func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T) { From 14d4bb2acde67a9f485fb2f117af4c73ef0d2379 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Thu, 16 Feb 2023 07:09:23 +0900 Subject: [PATCH 070/561] doc: Fix chart name for helm commands in docs (#2287) --- TROUBLESHOOTING.md | 2 +- docs/about-arc.md | 2 +- docs/authenticating-to-the-github-api.md | 4 ++-- docs/automatically-scaling-runners.md | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md index e87ccf461d..3dde50db11 100644 --- a/TROUBLESHOOTING.md +++ b/TROUBLESHOOTING.md @@ -67,7 +67,7 @@ To fix this, you may either: # With helm, you'd set `webhookPort` to the port number of your choice # See https://github.com/actions/actions-runner-controller/pull/1410/files for more information helm upgrade --install --namespace actions-runner-system --create-namespace \ - --wait actions-runner-controller actions/actions-runner-controller \ + --wait actions-runner-controller actions-runner-controller/actions-runner-controller \ --set webhookPort=10250 ``` diff --git a/docs/about-arc.md b/docs/about-arc.md index 532d29f006..01e7b0775c 100644 --- a/docs/about-arc.md +++ b/docs/about-arc.md @@ -44,7 +44,7 @@ The helm command (in the QuickStart guide) installs the custom resources into th ```console helm install -f custom-values.yaml --wait --namespace actions-runner-system \ --create-namespace actions-runner-controller \ - actions/actions-runner-controller + actions-runner-controller/actions-runner-controller ``` ### Runner deployment diff --git a/docs/authenticating-to-the-github-api.md b/docs/authenticating-to-the-github-api.md index 9a5930ec04..80dcd301b0 100644 --- a/docs/authenticating-to-the-github-api.md +++ b/docs/authenticating-to-the-github-api.md @@ -160,7 +160,7 @@ Set the Helm chart values as follows: ```shell $ CA_BUNDLE=$(cat path/to/ca.pem | base64) -$ helm upgrade --install actions/actions-runner-controller \ +$ helm upgrade --install actions-runner-controller/actions-runner-controller \ certManagerEnabled=false \ admissionWebHooks.caBundle=${CA_BUNDLE} ``` @@ -170,7 +170,7 @@ $ helm upgrade --install actions/actions-runner-controller \ Set the Helm chart values as follows: ```shell -$ helm upgrade --install actions/actions-runner-controller \ +$ helm upgrade --install actions-runner-controller/actions-runner-controller \ certManagerEnabled=false ``` diff --git a/docs/automatically-scaling-runners.md b/docs/automatically-scaling-runners.md index 7c9c057a90..747800da3f 100644 --- a/docs/automatically-scaling-runners.md +++ b/docs/automatically-scaling-runners.md @@ -260,7 +260,7 @@ _[see the values documentation for all configuration options](../charts/actions- ```console $ helm upgrade --install --namespace actions-runner-system --create-namespace \ - --wait actions-runner-controller actions/actions-runner-controller \ + --wait actions-runner-controller actions-runner-controller/actions-runner-controller \ --set "githubWebhookServer.enabled=true,service.type=NodePort,githubWebhookServer.ports[0].nodePort=33080" ``` @@ -282,7 +282,7 @@ If you plan to expose ARC via Ingress, you might not be required to make it a `N ```console $ helm upgrade --install --namespace actions-runner-system --create-namespace \ - --wait actions-runner-controller actions/actions-runner-controller \ + --wait actions-runner-controller actions-runner-controller/actions-runner-controller \ --set "githubWebhookServer.enabled=true" ``` From 9a8c5670e0070eaf548900e81a3a10d541a93166 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Wed, 15 Feb 2023 17:29:56 -0500 Subject: [PATCH 071/561] Ask runner to wait for docker daemon from DinD. (#2292) --- charts/auto-scaling-runner-set/templates/_helpers.tpl | 8 ++++++++ charts/auto-scaling-runner-set/tests/template_test.go | 4 +++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/charts/auto-scaling-runner-set/templates/_helpers.tpl b/charts/auto-scaling-runner-set/templates/_helpers.tpl index 2ec151f683..3b51df2a80 100644 --- a/charts/auto-scaling-runner-set/templates/_helpers.tpl +++ b/charts/auto-scaling-runner-set/templates/_helpers.tpl @@ -189,6 +189,7 @@ volumeMounts: {{- $setDockerHost := 1 }} {{- $setDockerTlsVerify := 1 }} {{- $setDockerCertPath := 1 }} + {{- $setRunnerWaitDocker := 1 }} env: {{- with $container.env }} {{- range $i, $env := . }} @@ -201,6 +202,9 @@ env: {{- if eq $env.name "DOCKER_CERT_PATH" }} {{- $setDockerCertPath = 0 -}} {{- end }} + {{- if eq $env.name "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS" }} + {{- $setRunnerWaitDocker = 0 -}} + {{- end }} - name: {{ $env.name }} {{- range $envKey, $envVal := $env }} {{- if ne $envKey "name" }} @@ -221,6 +225,10 @@ env: - name: DOCKER_CERT_PATH value: /certs/client {{- end }} + {{- if $setRunnerWaitDocker }} + - name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS + value: "120" + {{- end }} {{- $mountWork := 1 }} {{- $mountDindCert := 1 }} volumeMounts: diff --git a/charts/auto-scaling-runner-set/tests/template_test.go b/charts/auto-scaling-runner-set/tests/template_test.go index f1f2858830..89439b0f72 100644 --- a/charts/auto-scaling-runner-set/tests/template_test.go +++ b/charts/auto-scaling-runner-set/tests/template_test.go @@ -594,13 +594,15 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container") assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name) assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) - assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 3, "The runner container should have 3 env vars, DOCKER_HOST, DOCKER_TLS_VERIFY and DOCKER_CERT_PATH") + assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 4, "The runner container should have 4 env vars, DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS") assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name) assert.Equal(t, "tcp://localhost:2376", ars.Spec.Template.Spec.Containers[0].Env[0].Value) assert.Equal(t, "DOCKER_TLS_VERIFY", ars.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "1", ars.Spec.Template.Spec.Containers[0].Env[1].Value) assert.Equal(t, "DOCKER_CERT_PATH", ars.Spec.Template.Spec.Containers[0].Env[2].Name) assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[0].Env[2].Value) + assert.Equal(t, "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS", ars.Spec.Template.Spec.Containers[0].Env[3].Name) + assert.Equal(t, "120", ars.Spec.Template.Spec.Containers[0].Env[3].Value) assert.Len(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, 2, "The runner container should have 2 volume mounts, dind-cert and work") assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name) From e5094cb18320878be3639bbcb731296e310e1195 Mon Sep 17 00:00:00 2001 From: Ava Stancu Date: Fri, 17 Feb 2023 13:16:20 +0100 Subject: [PATCH 072/561] Added ability to configure log level from chart values (#2252) --- .../templates/deployment.yaml | 3 +++ .../tests/template_test.go | 12 ++++++++---- charts/actions-runner-controller-2/values.yaml | 7 ++++++- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/charts/actions-runner-controller-2/templates/deployment.yaml b/charts/actions-runner-controller-2/templates/deployment.yaml index 37185152bc..0813f355ce 100644 --- a/charts/actions-runner-controller-2/templates/deployment.yaml +++ b/charts/actions-runner-controller-2/templates/deployment.yaml @@ -48,6 +48,9 @@ spec: {{- with .Values.imagePullSecrets }} - "--auto-scaler-image-pull-secrets={{ include "actions-runner-controller-2.imagePullSecretsNames" . }}" {{- end }} + {{- with .Values.flags.logLevel }} + - "--log-level={{ . }}" + {{- end }} command: - "/manager" env: diff --git a/charts/actions-runner-controller-2/tests/template_test.go b/charts/actions-runner-controller-2/tests/template_test.go index 1aa2f93995..309304a61f 100644 --- a/charts/actions-runner-controller-2/tests/template_test.go +++ b/charts/actions-runner-controller-2/tests/template_test.go @@ -255,8 +255,9 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 1) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 2) assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) + assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2) assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) @@ -361,9 +362,10 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 2) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3) assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1]) + assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2) assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) @@ -471,10 +473,11 @@ func TestTemplate_EnableLeaderElection(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4) assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) assert.Equal(t, "--enable-leader-election", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--leader-election-id=test-arc-actions-runner-controller-2", deployment.Spec.Template.Spec.Containers[0].Args[2]) + assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[3]) } func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) { @@ -502,7 +505,8 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) { assert.Equal(t, namespaceName, deployment.Namespace) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 2) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3) assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1]) + assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) } diff --git a/charts/actions-runner-controller-2/values.yaml b/charts/actions-runner-controller-2/values.yaml index cc139655e1..2dfaa27393 100644 --- a/charts/actions-runner-controller-2/values.yaml +++ b/charts/actions-runner-controller-2/values.yaml @@ -62,4 +62,9 @@ affinity: {} # Leverage a PriorityClass to ensure your pods survive resource shortages # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ # PriorityClass: system-cluster-critical -priorityClassName: "" \ No newline at end of file +priorityClassName: "" + +flags: + # Log level can be set here with one of the following values: "debug", "info", "warn", "error". + # Defaults to "debug". + logLevel: "debug" From d2edb32d43a8b987578660f05562a2c85e05f5e4 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Tue, 21 Feb 2023 08:18:59 +0900 Subject: [PATCH 073/561] Fix manager crashloopback for ARC deployments without scaleset-related controllers (#2293) --- main.go | 126 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 68 insertions(+), 58 deletions(-) diff --git a/main.go b/main.go index 754c1cbbe3..6cdd4ef744 100644 --- a/main.go +++ b/main.go @@ -317,71 +317,81 @@ func main() { } } + // We use this environment avariable to turn on the ScaleSet related controllers. + // Otherwise ARC's legacy chart is unable to deploy a working ARC controller-manager pod, + // due to that the chart does not contain new actions.* CRDs while ARC requires those CRDs. + // + // We might have used a more explicitly named environment variable for this, + // e.g. "CONTROLLER_MANAGER_ENABLE_SCALE_SET" to explicitly enable the new controllers, + // or "CONTROLLER_MANAGER_DISABLE_SCALE_SET" to explicitly disable the new controllers. + // However, doing so would affect either private ARC testers or current ARC users + // who run ARC without those variabls. mgrPodName := os.Getenv("CONTROLLER_MANAGER_POD_NAME") - mgrPodNamespace := os.Getenv("CONTROLLER_MANAGER_POD_NAMESPACE") - var mgrPod corev1.Pod - err = mgr.GetAPIReader().Get(context.Background(), types.NamespacedName{Namespace: mgrPodNamespace, Name: mgrPodName}, &mgrPod) - if err != nil { - log.Error(err, fmt.Sprintf("unable to obtain manager pod: %s (%s)", mgrPodName, mgrPodNamespace)) - os.Exit(1) - } - - var mgrContainer *corev1.Container - for _, container := range mgrPod.Spec.Containers { - if container.Name == "manager" { - mgrContainer = &container - break + if mgrPodName != "" { + mgrPodNamespace := os.Getenv("CONTROLLER_MANAGER_POD_NAMESPACE") + var mgrPod corev1.Pod + err = mgr.GetAPIReader().Get(context.Background(), types.NamespacedName{Namespace: mgrPodNamespace, Name: mgrPodName}, &mgrPod) + if err != nil { + log.Error(err, fmt.Sprintf("unable to obtain manager pod: %s (%s)", mgrPodName, mgrPodNamespace)) + os.Exit(1) } - } - if mgrContainer != nil { - log.Info("Detected manager container", "image", mgrContainer.Image) - } else { - log.Error(err, "unable to obtain manager container image") - os.Exit(1) - } + var mgrContainer *corev1.Container + for _, container := range mgrPod.Spec.Containers { + if container.Name == "manager" { + mgrContainer = &container + break + } + } - if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("AutoscalingRunnerSet"), - Scheme: mgr.GetScheme(), - ControllerNamespace: mgrPodNamespace, - DefaultRunnerScaleSetListenerImage: mgrContainer.Image, - ActionsClient: actionsMultiClient, - DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets, - }).SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet") - os.Exit(1) - } + if mgrContainer != nil { + log.Info("Detected manager container", "image", mgrContainer.Image) + } else { + log.Error(err, "unable to obtain manager container image") + os.Exit(1) + } + if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("AutoscalingRunnerSet"), + Scheme: mgr.GetScheme(), + ControllerNamespace: mgrPodNamespace, + DefaultRunnerScaleSetListenerImage: mgrContainer.Image, + ActionsClient: actionsMultiClient, + DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets, + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet") + os.Exit(1) + } - if err = (&actionsgithubcom.EphemeralRunnerReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("EphemeralRunner"), - Scheme: mgr.GetScheme(), - ActionsClient: actionsMultiClient, - }).SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "EphemeralRunner") - os.Exit(1) - } + if err = (&actionsgithubcom.EphemeralRunnerReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("EphemeralRunner"), + Scheme: mgr.GetScheme(), + ActionsClient: actionsMultiClient, + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "EphemeralRunner") + os.Exit(1) + } - if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("EphemeralRunnerSet"), - Scheme: mgr.GetScheme(), - ActionsClient: actionsMultiClient, - }).SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet") - os.Exit(1) - } - if err = (&actionsgithubcom.AutoscalingListenerReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("AutoscalingListener"), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "AutoscalingListener") - os.Exit(1) + if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("EphemeralRunnerSet"), + Scheme: mgr.GetScheme(), + ActionsClient: actionsMultiClient, + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet") + os.Exit(1) + } + if err = (&actionsgithubcom.AutoscalingListenerReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("AutoscalingListener"), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "AutoscalingListener") + os.Exit(1) + } + // +kubebuilder:scaffold:builder } - // +kubebuilder:scaffold:builder if !disableAdmissionWebhook && !autoScalingRunnerSetOnly { injector := &actionssummerwindnet.PodRunnerTokenInjector{ From 3c6bc0e73f4a7e5f5a9d0a192832cb6cbc22eb19 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Mon, 20 Feb 2023 18:27:14 -0500 Subject: [PATCH 074/561] Use DOCKER_IMAGE_NAME instead of NAME to avoid conflict. (#2303) --- Makefile | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 4627ec322c..46c3a5e3e8 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,9 @@ ifdef DOCKER_USER - NAME ?= ${DOCKER_USER}/actions-runner-controller + DOCKER_IMAGE_NAME ?= ${DOCKER_USER}/actions-runner-controller else - NAME ?= summerwind/actions-runner-controller + DOCKER_IMAGE_NAME ?= summerwind/actions-runner-controller endif -DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1) +DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) VERSION ?= dev RUNNER_VERSION ?= 2.301.1 TARGETPLATFORM ?= $(shell arch) @@ -102,7 +102,7 @@ uninstall: manifests # Deploy controller in the configured Kubernetes cluster in ~/.kube/config deploy: manifests - cd config/manager && kustomize edit set image controller=${NAME}:${VERSION} + cd config/manager && kustomize edit set image controller=${DOCKER_IMAGE_NAME}:${VERSION} kustomize build config/default | kubectl apply -f - # Generate manifests e.g. CRD, RBAC etc. @@ -204,18 +204,18 @@ docker-buildx: --build-arg RUNNER_VERSION=${RUNNER_VERSION} \ --build-arg DOCKER_VERSION=${DOCKER_VERSION} \ --build-arg VERSION=${VERSION} \ - -t "${NAME}:${VERSION}" \ + -t "${DOCKER_IMAGE_NAME}:${VERSION}" \ -f Dockerfile \ . ${PUSH_ARG} # Push the docker image docker-push: - docker push ${NAME}:${VERSION} + docker push ${DOCKER_IMAGE_NAME}:${VERSION} docker push ${RUNNER_NAME}:${RUNNER_TAG} # Generate the release manifest file release: manifests - cd config/manager && kustomize edit set image controller=${NAME}:${VERSION} + cd config/manager && kustomize edit set image controller=${DOCKER_IMAGE_NAME}:${VERSION} mkdir -p release kustomize build config/default > release/actions-runner-controller.yaml @@ -239,7 +239,7 @@ acceptance/kind: # Otherwise `load docker-image` fail while running `docker save`. # See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap acceptance/load: - kind load docker-image ${NAME}:${VERSION} --name ${CLUSTER} + kind load docker-image ${DOCKER_IMAGE_NAME}:${VERSION} --name ${CLUSTER} kind load docker-image quay.io/brancz/kube-rbac-proxy:$(KUBE_RBAC_PROXY_VERSION) --name ${CLUSTER} kind load docker-image ${RUNNER_NAME}:${RUNNER_TAG} --name ${CLUSTER} kind load docker-image docker:dind --name ${CLUSTER} @@ -269,7 +269,7 @@ acceptance/teardown: kind delete cluster --name ${CLUSTER} acceptance/deploy: - NAME=${NAME} DOCKER_USER=${DOCKER_USER} VERSION=${VERSION} RUNNER_NAME=${RUNNER_NAME} RUNNER_TAG=${RUNNER_TAG} TEST_REPO=${TEST_REPO} \ + DOCKER_IMAGE_NAME=${DOCKER_IMAGE_NAME} DOCKER_USER=${DOCKER_USER} VERSION=${VERSION} RUNNER_NAME=${RUNNER_NAME} RUNNER_TAG=${RUNNER_TAG} TEST_REPO=${TEST_REPO} \ TEST_ORG=${TEST_ORG} TEST_ORG_REPO=${TEST_ORG_REPO} SYNC_PERIOD=${SYNC_PERIOD} \ USE_RUNNERSET=${USE_RUNNERSET} \ TEST_EPHEMERAL=${TEST_EPHEMERAL} \ From 2480e2f5b78390ef4640a81582bacfc04a9134b8 Mon Sep 17 00:00:00 2001 From: Andrei Vydrin Date: Tue, 21 Feb 2023 07:37:42 +0700 Subject: [PATCH 075/561] fix: case-insensitive webhook label matching (#2302) Co-authored-by: Yusuke Kuoka --- .../horizontal_runner_autoscaler_webhook.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go index 5f1f15007c..96be39873f 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "net/http" + "strings" "sync" "time" @@ -608,7 +609,7 @@ HRA: // TODO labels related to OS and architecture needs to be explicitly declared or the current implementation will not be able to find them. for _, l2 := range rs.Spec.Labels { - if l == l2 { + if strings.EqualFold(l, l2) { matched = true break } @@ -639,7 +640,7 @@ HRA: // TODO labels related to OS and architecture needs to be explicitly declared or the current implementation will not be able to find them. for _, l2 := range rd.Spec.Template.Spec.Labels { - if l == l2 { + if strings.EqualFold(l, l2) { matched = true break } From 4e83492d5dd442ca61f1930aa14d05f733f9171b Mon Sep 17 00:00:00 2001 From: Nathan Klick Date: Mon, 20 Feb 2023 19:56:46 -0600 Subject: [PATCH 076/561] Resolves the erroneous webhook scale down due to check runs (#2119) Signed-off-by: Nathan Klick --- .../horizontal_runner_autoscaler_webhook.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go index 96be39873f..59f7046994 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go @@ -210,9 +210,9 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons if e.GetAction() == "queued" { target.Amount = 1 break - } else if e.GetAction() == "completed" && e.GetWorkflowJob().GetConclusion() != "skipped" { - // A nagative amount is processed in the tryScale func as a scale-down request, - // that erasese the oldest CapacityReservation with the same amount. + } else if e.GetAction() == "completed" && e.GetWorkflowJob().GetConclusion() != "skipped" && e.GetWorkflowJob().GetRunnerID() > 0 { + // A negative amount is processed in the tryScale func as a scale-down request, + // that erases the oldest CapacityReservation with the same amount. // If the first CapacityReservation was with Replicas=1, this negative scale target erases that, // so that the resulting desired replicas decreases by 1. target.Amount = -1 From 5b3ab0de922b93dcf651d7e6ee4e7f091e284424 Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Tue, 21 Feb 2023 17:33:48 +0000 Subject: [PATCH 077/561] Add support for proxy (#2286) Co-authored-by: Nikola Jokic Co-authored-by: Tingluo Huang Co-authored-by: Ferenc Hammerl --- .../v1alpha1/autoscalinglistener_types.go | 6 +- .../v1alpha1/autoscalingrunnerset_types.go | 97 +++++- .../v1alpha1/ephemeralrunner_types.go | 3 + .../v1alpha1/proxy_config_test.go | 118 +++++++ .../v1alpha1/zz_generated.deepcopy.go | 19 +- ...tions.github.com_autoscalinglisteners.yaml | 23 ++ ...ions.github.com_autoscalingrunnersets.yaml | 12 +- .../actions.github.com_ephemeralrunners.yaml | 14 +- ...ctions.github.com_ephemeralrunnersets.yaml | 14 +- .../templates/autoscalingrunnerset.yaml | 17 + .../tests/template_test.go | 43 +++ charts/auto-scaling-runner-set/values.yaml | 14 + ...tions.github.com_autoscalinglisteners.yaml | 23 ++ ...ions.github.com_autoscalingrunnersets.yaml | 12 +- .../actions.github.com_ephemeralrunners.yaml | 14 +- ...ctions.github.com_ephemeralrunnersets.yaml | 14 +- config/manager/kustomization.yaml | 2 +- .../autoscalinglistener_controller.go | 118 ++++++- .../autoscalinglistener_controller_test.go | 235 ++++++++++++- .../autoscalingrunnerset_controller.go | 27 +- .../autoscalingrunnerset_controller_test.go | 210 ++++++++++++ controllers/actions.github.com/constants.go | 7 + .../ephemeralrunner_controller.go | 76 ++++- .../ephemeralrunner_controller_test.go | 185 ++++++++++ .../ephemeralrunnerset_controller.go | 113 ++++++- .../ephemeralrunnerset_controller_test.go | 319 ++++++++++++++++++ .../actions.github.com/resourcebuilder.go | 31 +- github/actions/client.go | 12 + github/actions/client_proxy_test.go | 39 +++ github/actions/multi_client.go | 4 +- github/actions/multi_client_test.go | 56 ++- go.mod | 8 +- go.sum | 8 + 33 files changed, 1795 insertions(+), 98 deletions(-) create mode 100644 apis/actions.github.com/v1alpha1/proxy_config_test.go create mode 100644 github/actions/client_proxy_test.go diff --git a/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go b/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go index 68eb7664a3..e4e5c383d9 100644 --- a/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go +++ b/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go @@ -54,11 +54,13 @@ type AutoscalingListenerSpec struct { // Required ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // +optional + Proxy *ProxyConfig `json:"proxy,omitempty"` } // AutoscalingListenerStatus defines the observed state of AutoscalingListener -type AutoscalingListenerStatus struct { -} +type AutoscalingListenerStatus struct{} //+kubebuilder:object:root=true //+kubebuilder:subresource:status diff --git a/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go index a842ff83f1..ba8af2fcbb 100644 --- a/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go +++ b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go @@ -17,7 +17,13 @@ limitations under the License. package v1alpha1 import ( + "fmt" + "net/http" + "net/url" + "strings" + "github.com/actions/actions-runner-controller/hash" + "golang.org/x/net/http/httpproxy" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -80,6 +86,94 @@ type ProxyConfig struct { // +optional HTTPS *ProxyServerConfig `json:"https,omitempty"` + + // +optional + NoProxy []string `json:"noProxy,omitempty"` +} + +func (c *ProxyConfig) toHTTPProxyConfig(secretFetcher func(string) (*corev1.Secret, error)) (*httpproxy.Config, error) { + config := &httpproxy.Config{ + NoProxy: strings.Join(c.NoProxy, ","), + } + + if c.HTTP != nil { + u, err := url.Parse(c.HTTP.Url) + if err != nil { + return nil, fmt.Errorf("failed to parse proxy http url %q: %w", c.HTTP.Url, err) + } + + if c.HTTP.CredentialSecretRef != "" { + secret, err := secretFetcher(c.HTTP.CredentialSecretRef) + if err != nil { + return nil, fmt.Errorf( + "failed to get secret %s for http proxy: %w", + c.HTTP.CredentialSecretRef, + err, + ) + } + + u.User = url.UserPassword( + string(secret.Data["username"]), + string(secret.Data["password"]), + ) + } + + config.HTTPProxy = u.String() + } + + if c.HTTPS != nil { + u, err := url.Parse(c.HTTPS.Url) + if err != nil { + return nil, fmt.Errorf("failed to parse proxy https url %q: %w", c.HTTPS.Url, err) + } + + if c.HTTPS.CredentialSecretRef != "" { + secret, err := secretFetcher(c.HTTPS.CredentialSecretRef) + if err != nil { + return nil, fmt.Errorf( + "failed to get secret %s for https proxy: %w", + c.HTTPS.CredentialSecretRef, + err, + ) + } + + u.User = url.UserPassword( + string(secret.Data["username"]), + string(secret.Data["password"]), + ) + } + + config.HTTPSProxy = u.String() + } + + return config, nil +} + +func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, error)) (map[string][]byte, error) { + config, err := c.toHTTPProxyConfig(secretFetcher) + if err != nil { + return nil, err + } + + data := map[string][]byte{} + data["http_proxy"] = []byte(config.HTTPProxy) + data["https_proxy"] = []byte(config.HTTPSProxy) + data["no_proxy"] = []byte(config.NoProxy) + + return data, nil +} + +func (c *ProxyConfig) ProxyFunc(secretFetcher func(string) (*corev1.Secret, error)) (func(*http.Request) (*url.URL, error), error) { + config, err := c.toHTTPProxyConfig(secretFetcher) + if err != nil { + return nil, err + } + + proxyFunc := func(req *http.Request) (*url.URL, error) { + return config.ProxyFunc()(req.URL) + } + + return proxyFunc, nil } type ProxyServerConfig struct { @@ -88,9 +182,6 @@ type ProxyServerConfig struct { // +optional CredentialSecretRef string `json:"credentialSecretRef,omitempty"` - - // +optional - NoProxy []string `json:"noProxy,omitempty"` } // AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet diff --git a/apis/actions.github.com/v1alpha1/ephemeralrunner_types.go b/apis/actions.github.com/v1alpha1/ephemeralrunner_types.go index dbfe040e0c..631abde385 100644 --- a/apis/actions.github.com/v1alpha1/ephemeralrunner_types.go +++ b/apis/actions.github.com/v1alpha1/ephemeralrunner_types.go @@ -59,6 +59,9 @@ type EphemeralRunnerSpec struct { // +optional Proxy *ProxyConfig `json:"proxy,omitempty"` + // +optional + ProxySecretRef string `json:"proxySecretRef,omitempty"` + // +optional GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"` diff --git a/apis/actions.github.com/v1alpha1/proxy_config_test.go b/apis/actions.github.com/v1alpha1/proxy_config_test.go new file mode 100644 index 0000000000..9291cde4e0 --- /dev/null +++ b/apis/actions.github.com/v1alpha1/proxy_config_test.go @@ -0,0 +1,118 @@ +package v1alpha1_test + +import ( + "net/http" + "testing" + + corev1 "k8s.io/api/core/v1" + + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestProxyConfig_ToSecret(t *testing.T) { + config := &v1alpha1.ProxyConfig{ + HTTP: &v1alpha1.ProxyServerConfig{ + Url: "http://proxy.example.com:8080", + CredentialSecretRef: "my-secret", + }, + HTTPS: &v1alpha1.ProxyServerConfig{ + Url: "https://proxy.example.com:8080", + CredentialSecretRef: "my-secret", + }, + NoProxy: []string{ + "noproxy.example.com", + "noproxy2.example.com", + }, + } + + secretFetcher := func(string) (*corev1.Secret, error) { + return &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("username"), + "password": []byte("password"), + }, + }, nil + } + + result, err := config.ToSecretData(secretFetcher) + require.NoError(t, err) + require.NotNil(t, result) + + assert.Equal(t, "http://username:password@proxy.example.com:8080", string(result["http_proxy"])) + assert.Equal(t, "https://username:password@proxy.example.com:8080", string(result["https_proxy"])) + assert.Equal(t, "noproxy.example.com,noproxy2.example.com", string(result["no_proxy"])) +} + +func TestProxyConfig_ProxyFunc(t *testing.T) { + config := &v1alpha1.ProxyConfig{ + HTTP: &v1alpha1.ProxyServerConfig{ + Url: "http://proxy.example.com:8080", + CredentialSecretRef: "my-secret", + }, + HTTPS: &v1alpha1.ProxyServerConfig{ + Url: "https://proxy.example.com:8080", + CredentialSecretRef: "my-secret", + }, + NoProxy: []string{ + "noproxy.example.com", + "noproxy2.example.com", + }, + } + + secretFetcher := func(string) (*corev1.Secret, error) { + return &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("username"), + "password": []byte("password"), + }, + }, nil + } + + result, err := config.ProxyFunc(secretFetcher) + require.NoError(t, err) + + tests := []struct { + name string + in string + out string + }{ + { + name: "http target", + in: "http://target.com", + out: "http://username:password@proxy.example.com:8080", + }, + { + name: "https target", + in: "https://target.com", + out: "https://username:password@proxy.example.com:8080", + }, + { + name: "no proxy", + in: "https://noproxy.example.com", + out: "", + }, + { + name: "no proxy 2", + in: "https://noproxy2.example.com", + out: "", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + req, err := http.NewRequest("GET", test.in, nil) + require.NoError(t, err) + u, err := result(req) + require.NoError(t, err) + + if test.out == "" { + assert.Nil(t, u) + return + } + + assert.Equal(t, test.out, u.String()) + }) + } +} diff --git a/apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go b/apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go index 753dd7fb3a..324707b25e 100644 --- a/apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go +++ b/apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go @@ -93,6 +93,11 @@ func (in *AutoscalingListenerSpec) DeepCopyInto(out *AutoscalingListenerSpec) { *out = make([]v1.LocalObjectReference, len(*in)) copy(*out, *in) } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(ProxyConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListenerSpec. @@ -448,12 +453,17 @@ func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { if in.HTTP != nil { in, out := &in.HTTP, &out.HTTP *out = new(ProxyServerConfig) - (*in).DeepCopyInto(*out) + **out = **in } if in.HTTPS != nil { in, out := &in.HTTPS, &out.HTTPS *out = new(ProxyServerConfig) - (*in).DeepCopyInto(*out) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = make([]string, len(*in)) + copy(*out, *in) } } @@ -470,11 +480,6 @@ func (in *ProxyConfig) DeepCopy() *ProxyConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyServerConfig) DeepCopyInto(out *ProxyServerConfig) { *out = *in - if in.NoProxy != nil { - in, out := &in.NoProxy, &out.NoProxy - *out = make([]string, len(*in)) - copy(*out, *in) - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyServerConfig. diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_autoscalinglisteners.yaml b/charts/actions-runner-controller-2/crds/actions.github.com_autoscalinglisteners.yaml index 18946cb318..f0f3f8fb11 100644 --- a/charts/actions-runner-controller-2/crds/actions.github.com_autoscalinglisteners.yaml +++ b/charts/actions-runner-controller-2/crds/actions.github.com_autoscalinglisteners.yaml @@ -76,6 +76,29 @@ spec: description: Required minimum: 0 type: integer + proxy: + properties: + http: + properties: + credentialSecretRef: + type: string + url: + description: Required + type: string + type: object + https: + properties: + credentialSecretRef: + type: string + url: + description: Required + type: string + type: object + noProxy: + items: + type: string + type: array + type: object runnerScaleSetId: description: Required type: integer diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml b/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml index 9542f522e3..0077540675 100644 --- a/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml +++ b/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml @@ -67,10 +67,6 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string @@ -79,14 +75,14 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string type: object + noProxy: + items: + type: string + type: array type: object runnerGroup: type: string diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml index f321a85af1..41cdc81b45 100644 --- a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml +++ b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml @@ -94,10 +94,6 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string @@ -106,15 +102,17 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string type: object + noProxy: + items: + type: string + type: array type: object + proxySecretRef: + type: string runnerScaleSetId: type: integer spec: diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml index d4b2d35130..072cd265fb 100644 --- a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml +++ b/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml @@ -76,10 +76,6 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string @@ -88,15 +84,17 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string type: object + noProxy: + items: + type: string + type: array type: object + proxySecretRef: + type: string runnerScaleSetId: type: integer spec: diff --git a/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml b/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml index e29ec157c5..d12c886a27 100644 --- a/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml +++ b/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml @@ -12,6 +12,23 @@ spec: runnerGroup: {{ . }} {{- end }} + {{- if .Values.proxy }} + proxy: + {{- if .Values.proxy.http }} + http: + url: {{ .Values.proxy.http.url }} + credentialSecretRef: {{ .Values.proxy.http.credentialSecretRef }} + {{ end }} + {{- if .Values.proxy.https }} + https: + url: {{ .Values.proxy.https.url }} + credentialSecretRef: {{ .Values.proxy.https.credentialSecretRef }} + {{ end }} + {{- if and .Values.proxy.noProxy (kindIs "slice" .Values.proxy.noProxy) }} + noProxy: {{ .Values.proxy.noProxy | toYaml | nindent 6}} + {{ end }} + {{ end }} + {{- if and (or (kindIs "int64" .Values.minRunners) (kindIs "float64" .Values.minRunners)) (or (kindIs "int64" .Values.maxRunners) (kindIs "float64" .Values.maxRunners)) }} {{- if gt .Values.minRunners .Values.maxRunners }} {{- fail "maxRunners has to be greater or equal to minRunners" }} diff --git a/charts/auto-scaling-runner-set/tests/template_test.go b/charts/auto-scaling-runner-set/tests/template_test.go index 89439b0f72..96a8e894ab 100644 --- a/charts/auto-scaling-runner-set/tests/template_test.go +++ b/charts/auto-scaling-runner-set/tests/template_test.go @@ -737,3 +737,46 @@ func TestTemplateRenderedAutoScalingRunnerSet_ErrorOnEmptyPredefinedSecret(t *te assert.ErrorContains(t, err, "Values.githubConfigSecret is required for setting auth with GitHub server") } + +func TestTemplateRenderedWithProxy(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secrets", + "proxy.http.url": "http://proxy.example.com", + "proxy.http.credentialSecretRef": "http-secret", + "proxy.https.url": "https://proxy.example.com", + "proxy.https.credentialSecretRef": "https-secret", + "proxy.noProxy": "{example.com,example.org}", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + require.NotNil(t, ars.Spec.Proxy) + require.NotNil(t, ars.Spec.Proxy.HTTP) + assert.Equal(t, "http://proxy.example.com", ars.Spec.Proxy.HTTP.Url) + assert.Equal(t, "http-secret", ars.Spec.Proxy.HTTP.CredentialSecretRef) + + require.NotNil(t, ars.Spec.Proxy.HTTPS) + assert.Equal(t, "https://proxy.example.com", ars.Spec.Proxy.HTTPS.Url) + assert.Equal(t, "https-secret", ars.Spec.Proxy.HTTPS.CredentialSecretRef) + + require.NotNil(t, ars.Spec.Proxy.NoProxy) + require.Len(t, ars.Spec.Proxy.NoProxy, 2) + assert.Contains(t, ars.Spec.Proxy.NoProxy, "example.com") + assert.Contains(t, ars.Spec.Proxy.NoProxy, "example.org") +} diff --git a/charts/auto-scaling-runner-set/values.yaml b/charts/auto-scaling-runner-set/values.yaml index 6494ecda53..0e3b10be25 100644 --- a/charts/auto-scaling-runner-set/values.yaml +++ b/charts/auto-scaling-runner-set/values.yaml @@ -22,6 +22,20 @@ githubConfigSecret: ## > kubectl create secret generic pre-defined-secret --namespace=my_namespace --from-literal=github_app_id=123456 --from-literal=github_app_installation_id=654321 --from-literal=github_app_private_key='-----BEGIN CERTIFICATE-----*******' # githubConfigSecret: pre-defined-secret +## proxy can be used to define proxy settings that will be used by the +## controller, the listener and the runner of this scale set. +# +# proxy: +# http: +# url: http://proxy.com:1234 +# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys +# https: +# url: http://proxy.com:1234 +# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys +# noProxy: +# - example.com +# - example.org + ## maxRunners is the max number of runners the auto scaling runner set will scale up to. # maxRunners: 5 diff --git a/config/crd/bases/actions.github.com_autoscalinglisteners.yaml b/config/crd/bases/actions.github.com_autoscalinglisteners.yaml index 18946cb318..f0f3f8fb11 100644 --- a/config/crd/bases/actions.github.com_autoscalinglisteners.yaml +++ b/config/crd/bases/actions.github.com_autoscalinglisteners.yaml @@ -76,6 +76,29 @@ spec: description: Required minimum: 0 type: integer + proxy: + properties: + http: + properties: + credentialSecretRef: + type: string + url: + description: Required + type: string + type: object + https: + properties: + credentialSecretRef: + type: string + url: + description: Required + type: string + type: object + noProxy: + items: + type: string + type: array + type: object runnerScaleSetId: description: Required type: integer diff --git a/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml index 9542f522e3..0077540675 100644 --- a/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml +++ b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml @@ -67,10 +67,6 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string @@ -79,14 +75,14 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string type: object + noProxy: + items: + type: string + type: array type: object runnerGroup: type: string diff --git a/config/crd/bases/actions.github.com_ephemeralrunners.yaml b/config/crd/bases/actions.github.com_ephemeralrunners.yaml index f321a85af1..41cdc81b45 100644 --- a/config/crd/bases/actions.github.com_ephemeralrunners.yaml +++ b/config/crd/bases/actions.github.com_ephemeralrunners.yaml @@ -94,10 +94,6 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string @@ -106,15 +102,17 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string type: object + noProxy: + items: + type: string + type: array type: object + proxySecretRef: + type: string runnerScaleSetId: type: integer spec: diff --git a/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml b/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml index d4b2d35130..072cd265fb 100644 --- a/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml +++ b/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml @@ -76,10 +76,6 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string @@ -88,15 +84,17 @@ spec: properties: credentialSecretRef: type: string - noProxy: - items: - type: string - type: array url: description: Required type: string type: object + noProxy: + items: + type: string + type: array type: object + proxySecretRef: + type: string runnerScaleSetId: type: integer spec: diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index a07ac82344..e7063a8d0a 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -4,5 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: jokicnikola07/actions-runner-controller + newName: summerwind/actions-runner-controller newTag: dev diff --git a/controllers/actions.github.com/autoscalinglistener_controller.go b/controllers/actions.github.com/autoscalinglistener_controller.go index faf2e4e61f..3110a74818 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller.go +++ b/controllers/actions.github.com/autoscalinglistener_controller.go @@ -40,6 +40,7 @@ import ( ) const ( + autoscalingListenerContainerName = "autoscaler" autoscalingListenerOwnerKey = ".metadata.controller" autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer" ) @@ -202,6 +203,21 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl. return r.createRoleBindingForListener(ctx, autoscalingListener, listenerRole, serviceAccount, log) } + // Create a secret containing proxy config if specifiec + if autoscalingListener.Spec.Proxy != nil { + proxySecret := new(corev1.Secret) + if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: proxyListenerSecretName(autoscalingListener)}, proxySecret); err != nil { + if !kerrors.IsNotFound(err) { + log.Error(err, "Unable to get listener proxy secret", "namespace", autoscalingListener.Namespace, "name", proxyListenerSecretName(autoscalingListener)) + return ctrl.Result{}, err + } + + // Create a mirror secret for the listener pod in the Controller namespace for listener pod to use + log.Info("Creating a listener proxy secret for the listener pod") + return r.createProxySecret(ctx, autoscalingListener, log) + } + } + // TODO: make sure the role binding has the up-to-date role and service account listenerPod := new(corev1.Pod) @@ -307,6 +323,25 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au } logger.Info("Listener pod is deleted") + if autoscalingListener.Spec.Proxy != nil { + logger.Info("Cleaning up the listener proxy secret") + proxySecret := new(corev1.Secret) + err = r.Get(ctx, types.NamespacedName{Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingListener.Namespace}, proxySecret) + switch { + case err == nil: + if proxySecret.ObjectMeta.DeletionTimestamp.IsZero() { + logger.Info("Deleting the listener proxy secret") + if err := r.Delete(ctx, proxySecret); err != nil { + return false, fmt.Errorf("failed to delete listener proxy secret: %v", err) + } + } + return false, nil + case err != nil && !kerrors.IsNotFound(err): + return false, fmt.Errorf("failed to get listener proxy secret: %v", err) + } + logger.Info("Listener proxy secret is deleted") + } + logger.Info("Cleaning up the listener service account") listenerSa := new(corev1.ServiceAccount) err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa) @@ -345,7 +380,49 @@ func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx cont } func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) { - newPod := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, serviceAccount, secret) + var envs []corev1.EnvVar + if autoscalingListener.Spec.Proxy != nil { + httpURL := corev1.EnvVar{ + Name: "http_proxy", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: proxyListenerSecretName(autoscalingListener)}, + Key: "http_proxy", + }, + }, + } + if autoscalingListener.Spec.Proxy.HTTP != nil { + envs = append(envs, httpURL) + } + + httpsURL := corev1.EnvVar{ + Name: "https_proxy", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: proxyListenerSecretName(autoscalingListener)}, + Key: "https_proxy", + }, + }, + } + if autoscalingListener.Spec.Proxy.HTTPS != nil { + envs = append(envs, httpsURL) + } + + noProxy := corev1.EnvVar{ + Name: "no_proxy", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: proxyListenerSecretName(autoscalingListener)}, + Key: "no_proxy", + }, + }, + } + if len(autoscalingListener.Spec.Proxy.NoProxy) > 0 { + envs = append(envs, noProxy) + } + } + + newPod := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, serviceAccount, secret, envs...) if err := ctrl.SetControllerReference(autoscalingListener, newPod, r.Scheme); err != nil { return ctrl.Result{}, err @@ -378,6 +455,45 @@ func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Con return ctrl.Result{}, nil } +func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) { + data, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) { + var secret corev1.Secret + err := r.Get(ctx, types.NamespacedName{Name: s, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, &secret) + if err != nil { + return nil, fmt.Errorf("failed to get secret %s: %w", s, err) + } + return &secret, nil + }) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to convert proxy config to secret data: %w", err) + } + + newProxySecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: proxyListenerSecretName(autoscalingListener), + Namespace: autoscalingListener.Namespace, + Labels: map[string]string{ + "auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + "auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName, + }, + }, + Data: data, + } + if err := ctrl.SetControllerReference(autoscalingListener, newProxySecret, r.Scheme); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create listener proxy secret: %w", err) + } + + logger.Info("Creating listener proxy secret", "namespace", newProxySecret.Namespace, "name", newProxySecret.Name) + if err := r.Create(ctx, newProxySecret); err != nil { + logger.Error(err, "Unable to create listener secret", "namespace", newProxySecret.Namespace, "name", newProxySecret.Name) + return ctrl.Result{}, err + } + + logger.Info("Created listener proxy secret", "namespace", newProxySecret.Namespace, "name", newProxySecret.Name) + + return ctrl.Result{}, nil +} + func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) { dataHash := hash.ComputeTemplateHash(secret.Data) updatedMirrorSecret := mirrorSecret.DeepCopy() diff --git a/controllers/actions.github.com/autoscalinglistener_controller_test.go b/controllers/actions.github.com/autoscalinglistener_controller_test.go index 09961efd23..d5cf3280f4 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller_test.go +++ b/controllers/actions.github.com/autoscalinglistener_controller_test.go @@ -13,7 +13,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" ) @@ -222,7 +224,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { Context("When deleting a new AutoScalingListener", func() { It("It should cleanup all resources for a deleting AutoScalingListener before removing it", func() { - // Waiting for the pod is created + // Waiting for the pod to be created pod := new(corev1.Pod) Eventually( func() (string, error) { @@ -391,3 +393,234 @@ var _ = Describe("Test AutoScalingListener controller", func() { }) }) }) + +var _ = Describe("Test AutoScalingListener controller with proxy", func() { + var ctx context.Context + var cancel context.CancelFunc + autoscalingNS := new(corev1.Namespace) + autoscalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet) + configSecret := new(corev1.Secret) + autoscalingListener := new(actionsv1alpha1.AutoscalingListener) + + createRunnerSetAndListener := func(proxy *actionsv1alpha1.ProxyConfig) { + min := 1 + max := 10 + autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + MaxRunners: &max, + MinRunners: &min, + Proxy: proxy, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + err := k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + autoscalingListener = &actionsv1alpha1.AutoscalingListener{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asl", + Namespace: autoscalingNS.Name, + }, + Spec: actionsv1alpha1.AutoscalingListenerSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + RunnerScaleSetId: 1, + AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace, + AutoscalingRunnerSetName: autoscalingRunnerSet.Name, + EphemeralRunnerSetName: "test-ers", + MaxRunners: 10, + MinRunners: 1, + Image: "ghcr.io/owner/repo", + Proxy: proxy, + }, + } + + err = k8sClient.Create(ctx, autoscalingListener) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingListener") + } + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.TODO()) + autoscalingNS = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-listener" + RandStringRunes(5)}, + } + + err := k8sClient.Create(ctx, autoscalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") + + configSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-config-secret", + Namespace: autoscalingNS.Name, + }, + Data: map[string][]byte{ + "github_token": []byte(autoscalingListenerTestGitHubToken), + }, + } + + err = k8sClient.Create(ctx, configSecret) + Expect(err).NotTo(HaveOccurred(), "failed to create config secret") + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Namespace: autoscalingNS.Name, + MetricsBindAddress: "0", + }) + Expect(err).NotTo(HaveOccurred(), "failed to create manager") + + controller := &AutoscalingListenerReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + } + err = controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + go func() { + defer GinkgoRecover() + + err := mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred(), "failed to start manager") + }() + }) + + AfterEach(func() { + defer cancel() + + err := k8sClient.Delete(ctx, autoscalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") + }) + + It("should create a secret in the listener namespace containing proxy details, use it to populate env vars on the pod and should delete it as part of cleanup", func() { + proxyCredentials := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "proxy-credentials", + Namespace: autoscalingNS.Name, + }, + Data: map[string][]byte{ + "username": []byte("test"), + "password": []byte("password"), + }, + } + + err := k8sClient.Create(ctx, proxyCredentials) + Expect(err).NotTo(HaveOccurred(), "failed to create proxy credentials secret") + + proxy := &actionsv1alpha1.ProxyConfig{ + HTTP: &actionsv1alpha1.ProxyServerConfig{ + Url: "http://localhost:8080", + CredentialSecretRef: "proxy-credentials", + }, + HTTPS: &actionsv1alpha1.ProxyServerConfig{ + Url: "https://localhost:8443", + CredentialSecretRef: "proxy-credentials", + }, + NoProxy: []string{ + "example.com", + "example.org", + }, + } + + createRunnerSetAndListener(proxy) + + var proxySecret corev1.Secret + Eventually( + func(g Gomega) { + err := k8sClient.Get( + ctx, + types.NamespacedName{Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingNS.Name}, + &proxySecret, + ) + g.Expect(err).NotTo(HaveOccurred(), "failed to get secret") + expected, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) { + var secret corev1.Secret + err := k8sClient.Get(ctx, types.NamespacedName{Name: s, Namespace: autoscalingNS.Name}, &secret) + if err != nil { + return nil, err + } + return &secret, nil + }) + g.Expect(err).NotTo(HaveOccurred(), "failed to convert proxy config to secret data") + g.Expect(proxySecret.Data).To(Equal(expected)) + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(Succeed(), "failed to create secret with proxy details") + + // wait for listener pod to be created + Eventually( + func(g Gomega) { + pod := new(corev1.Pod) + err := k8sClient.Get( + ctx, + client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, + pod, + ) + g.Expect(err).NotTo(HaveOccurred(), "failed to get pod") + + g.Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: "http_proxy", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: proxyListenerSecretName(autoscalingListener)}, + Key: "http_proxy", + }, + }, + }), "http_proxy environment variable not found") + + g.Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: "https_proxy", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: proxyListenerSecretName(autoscalingListener)}, + Key: "https_proxy", + }, + }, + }), "https_proxy environment variable not found") + + g.Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: "no_proxy", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: proxyListenerSecretName(autoscalingListener)}, + Key: "no_proxy", + }, + }, + }), "no_proxy environment variable not found") + }, + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(Succeed(), "failed to create listener pod with proxy details") + + // Delete the AutoScalingListener + err = k8sClient.Delete(ctx, autoscalingListener) + Expect(err).NotTo(HaveOccurred(), "failed to delete test AutoScalingListener") + + Eventually( + func(g Gomega) { + var proxySecret corev1.Secret + err := k8sClient.Get( + ctx, + types.NamespacedName{Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingNS.Name}, + &proxySecret, + ) + g.Expect(kerrors.IsNotFound(err)).To(BeTrue()) + }, + autoscalingListenerTestTimeout, + autoscalingListenerTestInterval).Should(Succeed(), "failed to delete secret with proxy details") + }) +}) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index b956d2815d..99fa0cb567 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -42,7 +42,6 @@ import ( const ( // TODO: Replace with shared image. - name = "autoscaler" autoscalingRunnerSetOwnerKey = ".metadata.controller" LabelKeyRunnerSpecHash = "runner-spec-hash" LabelKeyAutoScaleRunnerSetName = "auto-scale-runner-set-name" @@ -495,7 +494,31 @@ func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, a return nil, fmt.Errorf("failed to find GitHub config secret: %w", err) } - return r.ActionsClient.GetClientFromSecret(ctx, autoscalingRunnerSet.Spec.GitHubConfigUrl, autoscalingRunnerSet.Namespace, configSecret.Data) + var opts []actions.ClientOption + if autoscalingRunnerSet.Spec.Proxy != nil { + proxyFunc, err := autoscalingRunnerSet.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) { + var secret corev1.Secret + err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: s}, &secret) + if err != nil { + return nil, fmt.Errorf("failed to get proxy secret %s: %w", s, err) + } + + return &secret, nil + }) + if err != nil { + return nil, fmt.Errorf("failed to get proxy func: %w", err) + } + + opts = append(opts, actions.WithProxy(proxyFunc)) + } + + return r.ActionsClient.GetClientFromSecret( + ctx, + autoscalingRunnerSet.Spec.GitHubConfigUrl, + autoscalingRunnerSet.Namespace, + configSecret.Data, + opts..., + ) } // SetupWithManager sets up the controller with the Manager. diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index 65bebe8cbd..4b1ac8b916 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -2,7 +2,11 @@ package actionsgithubcom import ( "context" + "encoding/base64" "fmt" + "net/http" + "net/http/httptest" + "strings" "time" corev1 "k8s.io/api/core/v1" @@ -11,13 +15,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" logf "sigs.k8s.io/controller-runtime/pkg/log" + "github.com/go-logr/logr" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions/fake" + "github.com/actions/actions-runner-controller/github/actions/testserver" ) const ( @@ -570,3 +577,206 @@ var _ = Describe("Test AutoscalingController creation failures", func() { }) }) }) + +var _ = Describe("Test Client optional configuration", func() { + Context("When specifying a proxy", func() { + var ctx context.Context + var cancel context.CancelFunc + + autoscalingNS := new(corev1.Namespace) + configSecret := new(corev1.Secret) + var mgr ctrl.Manager + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.TODO()) + autoscalingNS = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)}, + } + + err := k8sClient.Create(ctx, autoscalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") + + configSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-config-secret", + Namespace: autoscalingNS.Name, + }, + Data: map[string][]byte{ + "github_token": []byte(autoscalingRunnerSetTestGitHubToken), + }, + } + + err = k8sClient.Create(ctx, configSecret) + Expect(err).NotTo(HaveOccurred(), "failed to create config secret") + + mgr, err = ctrl.NewManager(cfg, ctrl.Options{ + Namespace: autoscalingNS.Name, + }) + Expect(err).NotTo(HaveOccurred(), "failed to create manager") + + go func() { + defer GinkgoRecover() + + err := mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred(), "failed to start manager") + }() + }) + + AfterEach(func() { + defer cancel() + + err := k8sClient.Delete(ctx, autoscalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") + }) + + It("should be able to make requests to a server using a proxy", func() { + controller := &AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ControllerNamespace: autoscalingNS.Name, + DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", + ActionsClient: actions.NewMultiClient("test", logr.Discard()), + } + err := controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + serverSuccessfullyCalled := false + proxy := testserver.New(GinkgoT(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverSuccessfullyCalled = true + w.WriteHeader(http.StatusOK) + })) + + min := 1 + max := 10 + autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "http://example.com/org/repo", + GitHubConfigSecret: configSecret.Name, + MaxRunners: &max, + MinRunners: &min, + RunnerGroup: "testgroup", + Proxy: &v1alpha1.ProxyConfig{ + HTTP: &v1alpha1.ProxyServerConfig{ + Url: proxy.URL, + }, + }, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + err = k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + // wait for server to be called + Eventually( + func() (bool, error) { + return serverSuccessfullyCalled, nil + }, + autoscalingRunnerSetTestTimeout, + 1*time.Nanosecond, + ).Should(BeTrue(), "server was not called") + }) + + It("should be able to make requests to a server using a proxy with user info", func() { + controller := &AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ControllerNamespace: autoscalingNS.Name, + DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", + ActionsClient: actions.NewMultiClient("test", logr.Discard()), + } + err := controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + serverSuccessfullyCalled := false + proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + header := r.Header.Get("Proxy-Authorization") + Expect(header).NotTo(BeEmpty()) + + header = strings.TrimPrefix(header, "Basic ") + decoded, err := base64.StdEncoding.DecodeString(header) + Expect(err).NotTo(HaveOccurred()) + Expect(string(decoded)).To(Equal("test:password")) + + serverSuccessfullyCalled = true + w.WriteHeader(http.StatusOK) + })) + GinkgoT().Cleanup(func() { + proxy.Close() + }) + + secretCredentials := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "proxy-credentials", + Namespace: autoscalingNS.Name, + }, + Data: map[string][]byte{ + "username": []byte("test"), + "password": []byte("password"), + }, + } + + err = k8sClient.Create(ctx, secretCredentials) + Expect(err).NotTo(HaveOccurred(), "failed to create secret credentials") + + min := 1 + max := 10 + autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "http://example.com/org/repo", + GitHubConfigSecret: configSecret.Name, + MaxRunners: &max, + MinRunners: &min, + RunnerGroup: "testgroup", + Proxy: &v1alpha1.ProxyConfig{ + HTTP: &v1alpha1.ProxyServerConfig{ + Url: proxy.URL, + CredentialSecretRef: "proxy-credentials", + }, + }, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + err = k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + // wait for server to be called + Eventually( + func() (bool, error) { + return serverSuccessfullyCalled, nil + }, + autoscalingRunnerSetTestTimeout, + 1*time.Nanosecond, + ).Should(BeTrue(), "server was not called") + }) + }) +}) diff --git a/controllers/actions.github.com/constants.go b/controllers/actions.github.com/constants.go index 70f39628d7..613db79c07 100644 --- a/controllers/actions.github.com/constants.go +++ b/controllers/actions.github.com/constants.go @@ -9,3 +9,10 @@ const ( EnvVarRunnerJITConfig = "ACTIONS_RUNNER_INPUT_JITCONFIG" EnvVarRunnerExtraUserAgent = "GITHUB_ACTIONS_RUNNER_EXTRA_USER_AGENT" ) + +// Environment variable names used to set proxy variables for containers +const ( + EnvVarHTTPProxy = "http_proxy" + EnvVarHTTPSProxy = "https_proxy" + EnvVarNoProxy = "no_proxy" +) diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go index e6bfc9cb11..516526a3a0 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller.go +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -557,8 +557,56 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con } func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) { + var envs []corev1.EnvVar + if runner.Spec.ProxySecretRef != "" { + http := corev1.EnvVar{ + Name: "http_proxy", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: runner.Spec.ProxySecretRef, + }, + Key: "http_proxy", + }, + }, + } + if runner.Spec.Proxy.HTTP != nil { + envs = append(envs, http) + } + + https := corev1.EnvVar{ + Name: "https_proxy", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: runner.Spec.ProxySecretRef, + }, + Key: "https_proxy", + }, + }, + } + if runner.Spec.Proxy.HTTPS != nil { + envs = append(envs, https) + } + + noProxy := corev1.EnvVar{ + Name: "no_proxy", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: runner.Spec.ProxySecretRef, + }, + Key: "no_proxy", + }, + }, + } + if len(runner.Spec.Proxy.NoProxy) > 0 { + envs = append(envs, noProxy) + } + } + log.Info("Creating new pod for ephemeral runner") - newPod := r.resourceBuilder.newEphemeralRunnerPod(ctx, runner, secret) + newPod := r.resourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...) if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil { log.Error(err, "Failed to set controller reference to a new pod") @@ -632,7 +680,31 @@ func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner return nil, fmt.Errorf("failed to get secret: %w", err) } - return r.ActionsClient.GetClientFromSecret(ctx, runner.Spec.GitHubConfigUrl, runner.Namespace, secret.Data) + var opts []actions.ClientOption + if runner.Spec.Proxy != nil { + proxyFunc, err := runner.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) { + var secret corev1.Secret + err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: s}, &secret) + if err != nil { + return nil, fmt.Errorf("failed to get proxy secret %s: %w", s, err) + } + + return &secret, nil + }) + if err != nil { + return nil, fmt.Errorf("failed to get proxy func: %w", err) + } + + opts = append(opts, actions.WithProxy(proxyFunc)) + } + + return r.ActionsClient.GetClientFromSecret( + ctx, + runner.Spec.GitHubConfigUrl, + runner.Namespace, + secret.Data, + opts..., + ) } // runnerRegisteredWithService checks if the runner is still registered with the service diff --git a/controllers/actions.github.com/ephemeralrunner_controller_test.go b/controllers/actions.github.com/ephemeralrunner_controller_test.go index ba5d9fb201..3f1747ab18 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunner_controller_test.go @@ -2,12 +2,16 @@ package actionsgithubcom import ( "context" + "encoding/base64" "fmt" "net/http" + "net/http/httptest" + "strings" "time" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/github/actions" + "github.com/go-logr/logr" "github.com/actions/actions-runner-controller/github/actions/fake" . "github.com/onsi/ginkgo/v2" @@ -773,4 +777,185 @@ var _ = Describe("EphemeralRunner", func() { }, timeout, interval).Should(BeEquivalentTo(corev1.PodSucceeded)) }) }) + + Describe("Pod proxy config", func() { + var ctx context.Context + var cancel context.CancelFunc + + autoScalingNS := new(corev1.Namespace) + configSecret := new(corev1.Secret) + controller := new(EphemeralRunnerReconciler) + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + autoScalingNS = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testns-autoscaling-runner" + RandStringRunes(5), + }, + } + err := k8sClient.Create(ctx, autoScalingNS) + Expect(err).To(BeNil(), "failed to create test namespace for EphemeralRunner") + + configSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-config-secret", + Namespace: autoScalingNS.Name, + }, + Data: map[string][]byte{ + "github_token": []byte(gh_token), + }, + } + + err = k8sClient.Create(ctx, configSecret) + Expect(err).To(BeNil(), "failed to create config secret") + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Namespace: autoScalingNS.Name, + MetricsBindAddress: "0", + }) + Expect(err).To(BeNil(), "failed to create manager") + + controller = &EphemeralRunnerReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ActionsClient: fake.NewMultiClient(), + } + + err = controller.SetupWithManager(mgr) + Expect(err).To(BeNil(), "failed to setup controller") + + go func() { + defer GinkgoRecover() + + err := mgr.Start(ctx) + Expect(err).To(BeNil(), "failed to start manager") + }() + }) + + AfterEach(func() { + defer cancel() + + err := k8sClient.Delete(ctx, autoScalingNS) + Expect(err).To(BeNil(), "failed to delete test namespace for EphemeralRunner") + }) + + It("uses an actions client with proxy transport", func() { + // Use an actual client + controller.ActionsClient = actions.NewMultiClient("test", logr.Discard()) + + proxySuccessfulllyCalled := false + proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + header := r.Header.Get("Proxy-Authorization") + Expect(header).NotTo(BeEmpty()) + + header = strings.TrimPrefix(header, "Basic ") + decoded, err := base64.StdEncoding.DecodeString(header) + Expect(err).NotTo(HaveOccurred()) + Expect(string(decoded)).To(Equal("test:password")) + + proxySuccessfulllyCalled = true + w.WriteHeader(http.StatusOK) + })) + GinkgoT().Cleanup(func() { + proxy.Close() + }) + + secretCredentials := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "proxy-credentials", + Namespace: autoScalingNS.Name, + }, + Data: map[string][]byte{ + "username": []byte("test"), + "password": []byte("password"), + }, + } + + err := k8sClient.Create(ctx, secretCredentials) + Expect(err).NotTo(HaveOccurred(), "failed to create secret credentials") + + ephemeralRunner := newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name) + ephemeralRunner.Spec.GitHubConfigUrl = "http://example.com/org/repo" + ephemeralRunner.Spec.Proxy = &v1alpha1.ProxyConfig{ + HTTP: &v1alpha1.ProxyServerConfig{ + Url: proxy.URL, + CredentialSecretRef: "proxy-credentials", + }, + } + + err = k8sClient.Create(ctx, ephemeralRunner) + Expect(err).To(BeNil(), "failed to create ephemeral runner") + + Eventually( + func() bool { + return proxySuccessfulllyCalled + }, + 2*time.Second, + interval, + ).Should(BeEquivalentTo(true)) + }) + + It("It should create EphemeralRunner with proxy environment variables using ProxySecretRef", func() { + ephemeralRunner := newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name) + ephemeralRunner.Spec.Proxy = &v1alpha1.ProxyConfig{ + HTTP: &v1alpha1.ProxyServerConfig{ + Url: "http://proxy.example.com:8080", + }, + HTTPS: &v1alpha1.ProxyServerConfig{ + Url: "http://proxy.example.com:8080", + }, + NoProxy: []string{"example.com"}, + } + ephemeralRunner.Spec.ProxySecretRef = "proxy-secret" + err := k8sClient.Create(ctx, ephemeralRunner) + Expect(err).To(BeNil(), "failed to create ephemeral runner") + + pod := new(corev1.Pod) + Eventually( + func(g Gomega) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod) + g.Expect(err).To(BeNil(), "failed to get ephemeral runner pod") + }, + timeout, + interval, + ).Should(Succeed(), "failed to get ephemeral runner pod") + + Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: EnvVarHTTPProxy, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ephemeralRunner.Spec.ProxySecretRef, + }, + Key: "http_proxy", + }, + }, + })) + + Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: EnvVarHTTPSProxy, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ephemeralRunner.Spec.ProxySecretRef, + }, + Key: "https_proxy", + }, + }, + })) + + Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: EnvVarNoProxy, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ephemeralRunner.Spec.ProxySecretRef, + }, + Key: "no_proxy", + }, + }, + })) + }) + }) }) diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index e1840a4efa..29f40e0da0 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -122,6 +122,24 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, nil } + // Create proxy secret if not present + if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy != nil { + proxySecret := new(corev1.Secret) + if err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunnerSet.Namespace, Name: proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet)}, proxySecret); err != nil { + if !kerrors.IsNotFound(err) { + log.Error(err, "Unable to get ephemeralRunnerSet proxy secret", "namespace", ephemeralRunnerSet.Namespace, "name", proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet)) + return ctrl.Result{}, err + } + + // Create a compiled secret for the runner pods in the runnerset namespace + log.Info("Creating a ephemeralRunnerSet proxy secret for the runner pods") + if err := r.createProxySecret(ctx, ephemeralRunnerSet, log); err != nil { + log.Error(err, "Unable to create ephemeralRunnerSet proxy secret", "namespace", ephemeralRunnerSet.Namespace, "set-name", ephemeralRunnerSet.Name) + return ctrl.Result{}, err + } + } + } + // Find all EphemeralRunner with matching namespace and own by this EphemeralRunnerSet. ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList) err := r.List( @@ -196,15 +214,39 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, nil } -func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (done bool, err error) { +func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error { + if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy == nil { + return nil + } + log.Info("Deleting proxy secret") + + proxySecret := new(corev1.Secret) + proxySecret.Namespace = ephemeralRunnerSet.Namespace + proxySecret.Name = proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet) + + if err := r.Delete(ctx, proxySecret); err != nil && !kerrors.IsNotFound(err) { + return fmt.Errorf("failed to delete proxy secret: %v", err) + } + + log.Info("Deleted proxy secret") + + return nil +} + +func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) { ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList) - err = r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: ephemeralRunnerSet.Name}) + err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: ephemeralRunnerSet.Name}) if err != nil { return false, fmt.Errorf("failed to list child ephemeral runners: %v", err) } + log.Info("Actual Ephemeral runner counts", "count", len(ephemeralRunnerList.Items)) // only if there are no ephemeral runners left, return true if len(ephemeralRunnerList.Items) == 0 { + err := r.cleanUpProxySecret(ctx, ephemeralRunnerSet, log) + if err != nil { + return false, err + } log.Info("All ephemeral runners are deleted") return true, nil } @@ -269,6 +311,9 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex errs := make([]error, 0) for i := 0; i < count; i++ { ephemeralRunner := r.resourceBuilder.newEphemeralRunner(runnerSet) + if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil { + ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet) + } // Make sure that we own the resource we create. if err := ctrl.SetControllerReference(runnerSet, ephemeralRunner, r.Scheme); err != nil { @@ -290,6 +335,45 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex return multierr.Combine(errs...) } +func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error { + proxySecretData, err := ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) { + secret := new(corev1.Secret) + err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunnerSet.Namespace, Name: s}, secret) + return secret, err + }) + if err != nil { + return fmt.Errorf("failed to convert proxy config to secret data: %w", err) + } + + runnerPodProxySecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet), + Namespace: ephemeralRunnerSet.Namespace, + Labels: map[string]string{ + // TODO: figure out autoScalingRunnerSet name and set it as a label for this secret + // "auto-scaling-runner-set-namespace": ephemeralRunnerSet.Namespace, + // "auto-scaling-runner-set-name": ephemeralRunnerSet.Name, + }, + }, + Data: proxySecretData, + } + + // Make sure that we own the resource we create. + if err := ctrl.SetControllerReference(ephemeralRunnerSet, runnerPodProxySecret, r.Scheme); err != nil { + log.Error(err, "failed to set controller reference on proxy secret") + return err + } + + log.Info("Creating new proxy secret") + if err := r.Create(ctx, runnerPodProxySecret); err != nil { + log.Error(err, "failed to create proxy secret") + return err + } + + log.Info("Created new proxy secret") + return nil +} + // deleteIdleEphemeralRunners try to deletes `count` number of v1alpha1.EphemeralRunner resources in the cluster. // It will only delete `v1alpha1.EphemeralRunner` that has registered with Actions service // which has a `v1alpha1.EphemeralRunner.Status.RunnerId` set. @@ -366,8 +450,31 @@ func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil { return nil, fmt.Errorf("failed to get secret: %w", err) } + var opts []actions.ClientOption + if rs.Spec.EphemeralRunnerSpec.Proxy != nil { + proxyFunc, err := rs.Spec.EphemeralRunnerSpec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) { + var secret corev1.Secret + err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: s}, &secret) + if err != nil { + return nil, fmt.Errorf("failed to get secret %s: %w", s, err) + } + + return &secret, nil + }) + if err != nil { + return nil, fmt.Errorf("failed to get proxy func: %w", err) + } + + opts = append(opts, actions.WithProxy(proxyFunc)) + } - return r.ActionsClient.GetClientFromSecret(ctx, rs.Spec.EphemeralRunnerSpec.GitHubConfigUrl, rs.Namespace, secret.Data) + return r.ActionsClient.GetClientFromSecret( + ctx, + rs.Spec.EphemeralRunnerSpec.GitHubConfigUrl, + rs.Namespace, + secret.Data, + opts..., + ) } // SetupWithManager sets up the controller with the Manager. diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go index e51d91dd42..cba4eb2d5a 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -2,7 +2,11 @@ package actionsgithubcom import ( "context" + "encoding/base64" "fmt" + "net/http" + "net/http/httptest" + "strings" "time" corev1 "k8s.io/api/core/v1" @@ -11,11 +15,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" + "github.com/go-logr/logr" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions/fake" ) @@ -585,3 +592,315 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { }) }) }) + +var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func() { + var ctx context.Context + var cancel context.CancelFunc + autoscalingNS := new(corev1.Namespace) + ephemeralRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + configSecret := new(corev1.Secret) + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.TODO()) + autoscalingNS = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-runnerset" + RandStringRunes(5)}, + } + + err := k8sClient.Create(ctx, autoscalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for EphemeralRunnerSet") + + configSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-config-secret", + Namespace: autoscalingNS.Name, + }, + Data: map[string][]byte{ + "github_token": []byte(ephemeralRunnerSetTestGitHubToken), + }, + } + + err = k8sClient.Create(ctx, configSecret) + Expect(err).NotTo(HaveOccurred(), "failed to create config secret") + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Namespace: autoscalingNS.Name, + MetricsBindAddress: "0", + }) + Expect(err).NotTo(HaveOccurred(), "failed to create manager") + + controller := &EphemeralRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ActionsClient: actions.NewMultiClient("test", logr.Discard()), + } + err = controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + go func() { + defer GinkgoRecover() + + err := mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred(), "failed to start manager") + }() + }) + + AfterEach(func() { + defer cancel() + + err := k8sClient.Delete(ctx, autoscalingNS) + Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for EphemeralRunnerSet") + }) + + It("should create a proxy secret and delete the proxy secreat after the runner-set is deleted", func() { + secretCredentials := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "proxy-credentials", + Namespace: autoscalingNS.Name, + }, + Data: map[string][]byte{ + "username": []byte("username"), + "password": []byte("password"), + }, + } + + err := k8sClient.Create(ctx, secretCredentials) + Expect(err).NotTo(HaveOccurred(), "failed to create secret credentials") + + ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ + Replicas: 1, + EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ + GitHubConfigUrl: "http://example.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + RunnerScaleSetId: 100, + Proxy: &v1alpha1.ProxyConfig{ + HTTP: &v1alpha1.ProxyServerConfig{ + Url: "http://proxy.example.com", + CredentialSecretRef: secretCredentials.Name, + }, + HTTPS: &v1alpha1.ProxyServerConfig{ + Url: "https://proxy.example.com", + CredentialSecretRef: secretCredentials.Name, + }, + NoProxy: []string{"example.com", "example.org"}, + }, + PodTemplateSpec: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + }, + } + + err = k8sClient.Create(ctx, ephemeralRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet") + + Eventually(func(g Gomega) { + // Compiled / flattened proxy secret should exist at this point + actualProxySecret := &corev1.Secret{} + err = k8sClient.Get(ctx, client.ObjectKey{ + Namespace: autoscalingNS.Name, + Name: proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet), + }, actualProxySecret) + g.Expect(err).NotTo(HaveOccurred(), "failed to get compiled / flattened proxy secret") + + secretFetcher := func(name string) (*corev1.Secret, error) { + secret := &corev1.Secret{} + err = k8sClient.Get(ctx, client.ObjectKey{ + Namespace: autoscalingNS.Name, + Name: name, + }, secret) + return secret, err + } + + // Assert that the proxy secret is created with the correct values + expectedData, err := ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy.ToSecretData(secretFetcher) + g.Expect(err).NotTo(HaveOccurred(), "failed to get proxy secret data") + g.Expect(actualProxySecret.Data).To(Equal(expectedData)) + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(Succeed(), "compiled / flattened proxy secret should exist") + + Eventually(func(g Gomega) { + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + g.Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunners") + + for _, runner := range runnerList.Items { + g.Expect(runner.Spec.ProxySecretRef).To(Equal(proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet))) + } + }, ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestInterval).Should(Succeed(), "EphemeralRunners should have a reference to the proxy secret") + + // patch ephemeral runner set to have 0 replicas + patch := client.MergeFrom(ephemeralRunnerSet.DeepCopy()) + ephemeralRunnerSet.Spec.Replicas = 0 + err = k8sClient.Patch(ctx, ephemeralRunnerSet, patch) + Expect(err).NotTo(HaveOccurred(), "failed to patch EphemeralRunnerSet") + + // Set pods to PodSucceeded to simulate an actual EphemeralRunner stopping + Eventually( + func(g Gomega) (int, error) { + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + // Set status to simulate a configured EphemeralRunner + refetch := false + for i, runner := range runnerList.Items { + if runner.Status.RunnerId == 0 { + updatedRunner := runner.DeepCopy() + updatedRunner.Status.Phase = corev1.PodSucceeded + updatedRunner.Status.RunnerId = i + 100 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + refetch = true + } + } + + if refetch { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(1), "1 EphemeralRunner should exist") + + // Delete the EphemeralRunnerSet + err = k8sClient.Delete(ctx, ephemeralRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to delete EphemeralRunnerSet") + + // Assert that the proxy secret is deleted + Eventually(func(g Gomega) { + proxySecret := &corev1.Secret{} + err = k8sClient.Get(ctx, client.ObjectKey{ + Namespace: autoscalingNS.Name, + Name: proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet), + }, proxySecret) + g.Expect(err).To(HaveOccurred(), "proxy secret should be deleted") + g.Expect(kerrors.IsNotFound(err)).To(BeTrue(), "proxy secret should be deleted") + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(Succeed(), "proxy secret should be deleted") + }) + + It("should configure the actions client to use proxy details", func() { + secretCredentials := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "proxy-credentials", + Namespace: autoscalingNS.Name, + }, + Data: map[string][]byte{ + "username": []byte("test"), + "password": []byte("password"), + }, + } + + err := k8sClient.Create(ctx, secretCredentials) + Expect(err).NotTo(HaveOccurred(), "failed to create secret credentials") + + proxySuccessfulllyCalled := false + proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + header := r.Header.Get("Proxy-Authorization") + Expect(header).NotTo(BeEmpty()) + + header = strings.TrimPrefix(header, "Basic ") + decoded, err := base64.StdEncoding.DecodeString(header) + Expect(err).NotTo(HaveOccurred()) + Expect(string(decoded)).To(Equal("test:password")) + + proxySuccessfulllyCalled = true + w.WriteHeader(http.StatusOK) + })) + GinkgoT().Cleanup(func() { + proxy.Close() + }) + + ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ + Replicas: 1, + EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ + GitHubConfigUrl: "http://example.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + RunnerScaleSetId: 100, + Proxy: &v1alpha1.ProxyConfig{ + HTTP: &v1alpha1.ProxyServerConfig{ + Url: proxy.URL, + CredentialSecretRef: "proxy-credentials", + }, + }, + PodTemplateSpec: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + }, + } + + err = k8sClient.Create(ctx, ephemeralRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet") + + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + Eventually(func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(1), "failed to create ephemeral runner") + + runner := runnerList.Items[0].DeepCopy() + runner.Status.Phase = corev1.PodRunning + runner.Status.RunnerId = 100 + err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0])) + Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status") + + updatedRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, updatedRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updatedRunnerSet.Spec.Replicas = 0 + err = k8sClient.Update(ctx, updatedRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + Eventually( + func() bool { + return proxySuccessfulllyCalled + }, + 2*time.Second, + interval, + ).Should(BeEquivalentTo(true)) + }) +}) diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go index f6aa1a4719..a7c58caea5 100644 --- a/controllers/actions.github.com/resourcebuilder.go +++ b/controllers/actions.github.com/resourcebuilder.go @@ -18,10 +18,9 @@ const ( jitTokenKey = "jitToken" ) -type resourceBuilder struct { -} +type resourceBuilder struct{} -func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret) *corev1.Pod { +func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod { newLabels := map[string]string{} newLabels[scaleSetListenerLabel] = fmt.Sprintf("%v-%v", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, autoscalingListener.Spec.AutoscalingRunnerSetName) @@ -51,6 +50,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A Value: strconv.Itoa(autoscalingListener.Spec.RunnerScaleSetId), }, } + listenerEnv = append(listenerEnv, envs...) if _, ok := secret.Data["github_token"]; ok { listenerEnv = append(listenerEnv, corev1.EnvVar{ @@ -112,7 +112,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A ServiceAccountName: serviceAccount.Name, Containers: []corev1.Container{ { - Name: name, + Name: autoscalingListenerContainerName, Image: autoscalingListener.Spec.Image, Env: listenerEnv, ImagePullPolicy: corev1.PullIfNotPresent, @@ -299,6 +299,7 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1. MaxRunners: effectiveMaxRunners, Image: image, ImagePullSecrets: imagePullSecrets, + Proxy: autoscalingRunnerSet.Spec.Proxy, }, } @@ -316,7 +317,7 @@ func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme } } -func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret) *corev1.Pod { +func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod { var newPod corev1.Pod labels := map[string]string{} @@ -374,7 +375,9 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a corev1.EnvVar{ Name: EnvVarRunnerExtraUserAgent, Value: fmt.Sprintf("actions-runner-controller/%s", build.Version), - }) + }, + ) + c.Env = append(c.Env, envs...) } newPod.Spec.Containers = append(newPod.Spec.Containers, c) @@ -427,6 +430,22 @@ func scaleSetListenerSecretMirrorName(autoscalingListener *v1alpha1.AutoscalingL return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash) } +func proxyListenerSecretName(autoscalingListener *v1alpha1.AutoscalingListener) string { + namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace) + if len(namespaceHash) > 8 { + namespaceHash = namespaceHash[:8] + } + return fmt.Sprintf("%v-%v-listener-proxy", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash) +} + +func proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) string { + namespaceHash := hash.FNVHashString(ephemeralRunnerSet.Namespace) + if len(namespaceHash) > 8 { + namespaceHash = namespaceHash[:8] + } + return fmt.Sprintf("%v-%v-runner-proxy", ephemeralRunnerSet.Name, namespaceHash) +} + func rulesForListenerRole(resourceNames []string) []rbacv1.PolicyRule { return []rbacv1.PolicyRule{ { diff --git a/github/actions/client.go b/github/actions/client.go index 2d3c790682..4574b3546d 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -76,8 +76,12 @@ type Client struct { rootCAs *x509.CertPool tlsInsecureSkipVerify bool + + proxyFunc ProxyFunc } +type ProxyFunc func(req *http.Request) (*url.URL, error) + type ClientOption func(*Client) func WithUserAgent(userAgent string) ClientOption { @@ -116,6 +120,12 @@ func WithoutTLSVerify() ClientOption { } } +func WithProxy(proxyFunc ProxyFunc) ClientOption { + return func(c *Client) { + c.proxyFunc = proxyFunc + } +} + func NewClient(githubConfigURL string, creds *ActionsAuth, options ...ClientOption) (*Client, error) { config, err := ParseGitHubConfigFromURL(githubConfigURL) if err != nil { @@ -160,6 +170,8 @@ func NewClient(githubConfigURL string, creds *ActionsAuth, options ...ClientOpti transport.TLSClientConfig.InsecureSkipVerify = true } + transport.Proxy = ac.proxyFunc + retryClient.HTTPClient.Transport = transport ac.Client = retryClient.StandardClient() diff --git a/github/actions/client_proxy_test.go b/github/actions/client_proxy_test.go new file mode 100644 index 0000000000..c63d41a2b6 --- /dev/null +++ b/github/actions/client_proxy_test.go @@ -0,0 +1,39 @@ +package actions_test + +import ( + "net/http" + "net/url" + "testing" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/actions/actions-runner-controller/github/actions/testserver" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/http/httpproxy" +) + +func TestClientProxy(t *testing.T) { + serverCalled := false + + proxy := testserver.New(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverCalled = true + })) + + proxyConfig := &httpproxy.Config{ + HTTPProxy: proxy.URL, + } + proxyFunc := func(req *http.Request) (*url.URL, error) { + return proxyConfig.ProxyFunc()(req.URL) + } + + c, err := actions.NewClient("http://github.com/org/repo", nil, actions.WithProxy(proxyFunc)) + require.NoError(t, err) + + req, err := http.NewRequest(http.MethodGet, "http://example.com", nil) + require.NoError(t, err) + + _, err = c.Do(req) + require.NoError(t, err) + + assert.True(t, serverCalled) +} diff --git a/github/actions/multi_client.go b/github/actions/multi_client.go index bfef889371..3731687036 100644 --- a/github/actions/multi_client.go +++ b/github/actions/multi_client.go @@ -125,7 +125,7 @@ func (m *multiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, if hasToken { auth.Token = token - return m.GetClientFor(ctx, githubConfigURL, auth, namespace) + return m.GetClientFor(ctx, githubConfigURL, auth, namespace, options...) } parsedAppID, err := strconv.ParseInt(appID, 10, 64) @@ -139,7 +139,7 @@ func (m *multiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, } auth.AppCreds = &GitHubAppAuth{AppID: parsedAppID, AppInstallationID: parsedAppInstallationID, AppPrivateKey: appPrivateKey} - return m.GetClientFor(ctx, githubConfigURL, auth, namespace) + return m.GetClientFor(ctx, githubConfigURL, auth, namespace, options...) } func RootCAsFromConfigMap(configMapData map[string][]byte) (*x509.CertPool, error) { diff --git a/github/actions/multi_client_test.go b/github/actions/multi_client_test.go index 80d54a3fe4..8606353e56 100644 --- a/github/actions/multi_client_test.go +++ b/github/actions/multi_client_test.go @@ -55,24 +55,48 @@ func TestMultiClientOptions(t *testing.T) { defaultNamespace := "default" defaultConfigURL := "https://github.com/org/repo" - defaultCreds := &ActionsAuth{ - Token: "token", - } - multiClient := NewMultiClient("test-user-agent", logger) - service, err := multiClient.GetClientFor( - ctx, - defaultConfigURL, - *defaultCreds, - defaultNamespace, - WithUserAgent("test-option"), - ) - require.NoError(t, err) + t.Run("GetClientFor", func(t *testing.T) { + defaultCreds := &ActionsAuth{ + Token: "token", + } - client := service.(*Client) - req, err := client.NewGitHubAPIRequest(ctx, "GET", "/test", nil) - require.NoError(t, err) - assert.Equal(t, "test-option", req.Header.Get("User-Agent")) + multiClient := NewMultiClient("test-user-agent", logger) + service, err := multiClient.GetClientFor( + ctx, + defaultConfigURL, + *defaultCreds, + defaultNamespace, + WithUserAgent("test-option"), + ) + require.NoError(t, err) + + client := service.(*Client) + req, err := client.NewGitHubAPIRequest(ctx, "GET", "/test", nil) + require.NoError(t, err) + assert.Equal(t, "test-option", req.Header.Get("User-Agent")) + }) + + t.Run("GetClientFromSecret", func(t *testing.T) { + secret := map[string][]byte{ + "github_token": []byte("token"), + } + + multiClient := NewMultiClient("test-user-agent", logger) + service, err := multiClient.GetClientFromSecret( + ctx, + defaultConfigURL, + defaultNamespace, + secret, + WithUserAgent("test-option"), + ) + require.NoError(t, err) + + client := service.(*Client) + req, err := client.NewGitHubAPIRequest(ctx, "GET", "/test", nil) + require.NoError(t, err) + assert.Equal(t, "test-option", req.Header.Get("User-Agent")) + }) } func TestCreateJWT(t *testing.T) { diff --git a/go.mod b/go.mod index 237baa21f4..179672d350 100644 --- a/go.mod +++ b/go.mod @@ -84,10 +84,10 @@ require ( github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/term v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/net v0.6.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/term v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect diff --git a/go.sum b/go.sum index 64d3957d7b..7542f60efc 100644 --- a/go.sum +++ b/go.sum @@ -450,6 +450,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -518,10 +520,14 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -531,6 +537,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From aee1c31a14961a7645867c96ca56b60fb6dd3025 Mon Sep 17 00:00:00 2001 From: ggreenwood Date: Tue, 21 Feb 2023 13:40:23 -0500 Subject: [PATCH 078/561] Documentation corrections (#2116) Co-authored-by: Yusuke Kuoka --- TROUBLESHOOTING.md | 2 +- docs/quickstart.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md index 3dde50db11..2e5c92c245 100644 --- a/TROUBLESHOOTING.md +++ b/TROUBLESHOOTING.md @@ -303,4 +303,4 @@ If you noticed that it takes several minutes for sidecar dind container to be cr **Solution** -The solution is to switch to using faster storage, if you are experiencing this issue you are probably using hdd, switch to ssh fixed the problem in my case. Most cloud providers have a list of storage options to use just pick something faster that your current disk, for on prem clusters you will need to invest in some ssds. +The solution is to switch to using faster storage, if you are experiencing this issue you are probably using HDD storage. Switching to SSD storage fixed the problem in my case. Most cloud providers have a list of storage options to use just pick something faster that your current disk, for on prem clusters you will need to invest in some SSDs. diff --git a/docs/quickstart.md b/docs/quickstart.md index 4061cf5026..a278512c32 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -107,7 +107,7 @@ spec: spec: repository: mumoshu/actions-runner-controller-ci ```` - *note:- Replace "mumoshu/actions-runner-controller-ci" with your repository name. + *note:- Replace "mumoshu/actions-runner-controller-ci" with the name of the GitHub repository the runner will be associated with. Apply this file to your K8s cluster. ```shell From 745242740ff26a1a5d1654f4ca4b909295d56f35 Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Wed, 22 Feb 2023 16:57:59 +0000 Subject: [PATCH 079/561] Disable metrics serving in proxy tests (#2307) --- .../autoscalingrunnerset_controller_test.go | 7 +++++-- .../ephemeralrunnerset_controller_test.go | 2 +- controllers/actions.summerwind.net/integration_test.go | 5 ++--- .../runnerdeployment_controller_test.go | 5 ++--- .../runnerreplicaset_controller_test.go | 3 ++- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index 4b1ac8b916..26918c9ee6 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -464,7 +464,9 @@ var _ = Describe("Test AutoscalingController creation failures", func() { err = k8sClient.Create(ctx, configSecret) Expect(err).NotTo(HaveOccurred(), "failed to create config secret") - mgr, err := ctrl.NewManager(cfg, ctrl.Options{}) + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + MetricsBindAddress: "0", + }) Expect(err).NotTo(HaveOccurred(), "failed to create manager") controller := &AutoscalingRunnerSetReconciler{ @@ -610,7 +612,8 @@ var _ = Describe("Test Client optional configuration", func() { Expect(err).NotTo(HaveOccurred(), "failed to create config secret") mgr, err = ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoscalingNS.Name, + Namespace: autoscalingNS.Name, + MetricsBindAddress: "0", }) Expect(err).NotTo(HaveOccurred(), "failed to create manager") diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go index cba4eb2d5a..571492b907 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -458,7 +458,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { runningRunner = runnerList.Items[1].DeepCopy() runningRunner.Status.JobRequestId = 1001 - err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[0])) + err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[1])) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") // Scale down to 1 diff --git a/controllers/actions.summerwind.net/integration_test.go b/controllers/actions.summerwind.net/integration_test.go index d5d967d141..d10184eb84 100644 --- a/controllers/actions.summerwind.net/integration_test.go +++ b/controllers/actions.summerwind.net/integration_test.go @@ -69,7 +69,8 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment { Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: ns.Name, + Namespace: ns.Name, + MetricsBindAddress: "0", }) Expect(err).NotTo(HaveOccurred(), "failed to create manager") @@ -188,7 +189,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() { ns := env.Namespace Describe("when no existing resources exist", func() { - It("should create and scale organization's repository runners on workflow_job event", func() { name := "example-runnerdeploy" @@ -455,7 +455,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() { env.ExpectRegisteredNumberCountEventuallyEquals(2, "count of fake list runners") } }) - }) }) diff --git a/controllers/actions.summerwind.net/runnerdeployment_controller_test.go b/controllers/actions.summerwind.net/runnerdeployment_controller_test.go index 5fe065665b..220ac626c8 100644 --- a/controllers/actions.summerwind.net/runnerdeployment_controller_test.go +++ b/controllers/actions.summerwind.net/runnerdeployment_controller_test.go @@ -143,7 +143,8 @@ func SetupDeploymentTest(ctx2 context.Context) *corev1.Namespace { Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: ns.Name, + Namespace: ns.Name, + MetricsBindAddress: "0", }) Expect(err).NotTo(HaveOccurred(), "failed to create manager") @@ -180,7 +181,6 @@ var _ = Context("Inside of a new namespace", func() { ns := SetupDeploymentTest(ctx) Describe("when no existing resources exist", func() { - It("should create a new RunnerReplicaSet resource from the specified template, add a another RunnerReplicaSet on template modification, and eventually removes old runnerreplicasets", func() { name := "example-runnerdeploy-1" @@ -491,6 +491,5 @@ var _ = Context("Inside of a new namespace", func() { time.Second*1, time.Millisecond*500).Should(Not(BeNil())) } }) - }) }) diff --git a/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go b/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go index f018a45694..cba3c9d54a 100644 --- a/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go +++ b/controllers/actions.summerwind.net/runnerreplicaset_controller_test.go @@ -46,7 +46,8 @@ func SetupTest(ctx2 context.Context) *corev1.Namespace { Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: ns.Name, + Namespace: ns.Name, + MetricsBindAddress: "0", }) Expect(err).NotTo(HaveOccurred(), "failed to create manager") From f30a4dff16ffbedc3d1a412b99bc7ecdc9e493d8 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Thu, 23 Feb 2023 03:40:21 -0500 Subject: [PATCH 080/561] Bump arc-2 chart version and prepare 0.2.0 release (#2313) --- charts/actions-runner-controller-2/Chart.yaml | 4 +-- .../tests/template_test.go | 29 ++++++++++++++++--- charts/auto-scaling-runner-set/Chart.yaml | 4 +-- go.mod | 4 +-- 4 files changed, 31 insertions(+), 10 deletions(-) diff --git a/charts/actions-runner-controller-2/Chart.yaml b/charts/actions-runner-controller-2/Chart.yaml index e592f5a12d..5e7984a0bd 100644 --- a/charts/actions-runner-controller-2/Chart.yaml +++ b/charts/actions-runner-controller-2/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.2.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "preview" +appVersion: "0.2.0" home: https://github.com/actions/actions-runner-controller diff --git a/charts/actions-runner-controller-2/tests/template_test.go b/charts/actions-runner-controller-2/tests/template_test.go index 309304a61f..6c77fa3650 100644 --- a/charts/actions-runner-controller-2/tests/template_test.go +++ b/charts/actions-runner-controller-2/tests/template_test.go @@ -1,6 +1,7 @@ package tests import ( + "os" "path/filepath" "strings" "testing" @@ -10,11 +11,17 @@ import ( "github.com/gruntwork-io/terratest/modules/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" ) +type Chart struct { + Version string `yaml:"version"` + AppVersion string `yaml:"appVersion"` +} + func TestTemplate_CreateServiceAccount(t *testing.T) { t.Parallel() @@ -201,6 +208,13 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") require.NoError(t, err) + chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml")) + require.NoError(t, err) + + chart := new(Chart) + err = yaml.Unmarshal(chartContent, chart) + require.NoError(t, err) + releaseName := "test-arc" namespaceName := "test-" + strings.ToLower(random.UniqueId()) @@ -218,10 +232,10 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { assert.Equal(t, namespaceName, deployment.Namespace) assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Name) - assert.Equal(t, "actions-runner-controller-2-0.1.0", deployment.Labels["helm.sh/chart"]) + assert.Equal(t, "actions-runner-controller-2-"+chart.Version, deployment.Labels["helm.sh/chart"]) assert.Equal(t, "actions-runner-controller-2", deployment.Labels["app.kubernetes.io/name"]) assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) - assert.Equal(t, "preview", deployment.Labels["app.kubernetes.io/version"]) + assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) assert.Equal(t, int32(1), *deployment.Spec.Replicas) @@ -280,6 +294,13 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") require.NoError(t, err) + chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml")) + require.NoError(t, err) + + chart := new(Chart) + err = yaml.Unmarshal(chartContent, chart) + require.NoError(t, err) + releaseName := "test-arc" namespaceName := "test-" + strings.ToLower(random.UniqueId()) @@ -315,10 +336,10 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Equal(t, namespaceName, deployment.Namespace) assert.Equal(t, "actions-runner-controller-2-fullname-override", deployment.Name) - assert.Equal(t, "actions-runner-controller-2-0.1.0", deployment.Labels["helm.sh/chart"]) + assert.Equal(t, "actions-runner-controller-2-"+chart.Version, deployment.Labels["helm.sh/chart"]) assert.Equal(t, "actions-runner-controller-2-override", deployment.Labels["app.kubernetes.io/name"]) assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) - assert.Equal(t, "preview", deployment.Labels["app.kubernetes.io/version"]) + assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) assert.Equal(t, "bar", deployment.Labels["foo"]) assert.Equal(t, "actions", deployment.Labels["github"]) diff --git a/charts/auto-scaling-runner-set/Chart.yaml b/charts/auto-scaling-runner-set/Chart.yaml index 6a41e511ad..0de198886e 100644 --- a/charts/auto-scaling-runner-set/Chart.yaml +++ b/charts/auto-scaling-runner-set/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.2.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.1.0" +appVersion: "0.2.0" home: https://github.com/actions/dev-arc diff --git a/go.mod b/go.mod index 179672d350..81d11f4666 100644 --- a/go.mod +++ b/go.mod @@ -25,8 +25,10 @@ require ( github.com/teambition/rrule-go v1.8.0 go.uber.org/multierr v1.7.0 go.uber.org/zap v1.24.0 + golang.org/x/net v0.6.0 golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 gomodules.xyz/jsonpatch/v2 v2.2.0 + gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.26.0 k8s.io/apimachinery v0.26.0 k8s.io/client-go v0.26.0 @@ -84,7 +86,6 @@ require ( github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/net v0.6.0 // indirect golang.org/x/sys v0.5.0 // indirect golang.org/x/term v0.5.0 // indirect golang.org/x/text v0.7.0 // indirect @@ -92,7 +93,6 @@ require ( google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.26.0 // indirect k8s.io/component-base v0.26.0 // indirect From 87c10ae352974f9a14aad479e3ca21b3235f5e85 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Thu, 23 Feb 2023 12:20:39 +0100 Subject: [PATCH 081/561] Add release change log to quickstart guide (#2315) --- .../actions-runner-controller-2/README.md | 78 +++++++++++++++++-- 1 file changed, 73 insertions(+), 5 deletions(-) diff --git a/docs/preview/actions-runner-controller-2/README.md b/docs/preview/actions-runner-controller-2/README.md index 792d4c0a5f..ec74acbdd9 100644 --- a/docs/preview/actions-runner-controller-2/README.md +++ b/docs/preview/actions-runner-controller-2/README.md @@ -38,7 +38,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 --namespace "${NAMESPACE}" \ --create-namespace \ oci://ghcr.io/actions/actions-runner-controller-charts/actions-runner-controller-2 \ - --version 0.1.0 + --version 0.2.0 ``` 1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app). @@ -59,7 +59,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 --create-namespace \ --set githubConfigUrl="${GITHUB_CONFIG_URL}" \ --set githubConfigSecret.github_token="${GITHUB_PAT}" \ - oci://ghcr.io/actions/actions-runner-controller-charts/auto-scaling-runner-set --version 0.1.0 + oci://ghcr.io/actions/actions-runner-controller-charts/auto-scaling-runner-set --version 0.2.0 ``` ```bash @@ -77,7 +77,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 --set githubConfigSecret.github_app_id="${GITHUB_APP_ID}" \ --set githubConfigSecret.github_app_installation_id="${GITHUB_APP_INSTALLATION_ID}" \ --set githubConfigSecret.github_app_private_key="${GITHUB_APP_PRIVATE_KEY}" \ - oci://ghcr.io/actions/actions-runner-controller-charts/auto-scaling-runner-set --version 0.1.0 + oci://ghcr.io/actions/actions-runner-controller-charts/auto-scaling-runner-set --version 0.2.0 ``` 1. Check your installation. If everything went well, you should see the following: @@ -86,8 +86,8 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 $ helm list -n "${NAMESPACE}" NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION - arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed actions-runner-controller-2-0.1.0 preview - arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed auto-scaling-runner-set-0.1.0 0.1.0 + arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed actions-runner-controller-2-0.2.0 preview + arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed auto-scaling-runner-set-0.2.0 0.2.0 ``` ```bash @@ -141,3 +141,71 @@ kubectl logs -n "${NAMESPACE}" -l runner-scale-set-listener=arc-systems-arc-runn ### If you installed the autoscaling runner set, but the listener pod is not created Verify that the secret you provided is correct and that the `githubConfigUrl` you provided is accurate. + +## Changelog + +### v0.2.0 + +#### Major changes + +1. Added proxy support for the controller and the runner pods, see the new helm chart fields [#2286](https://github.com/actions/actions-runner-controller/pull/2286) +1. Added the abiilty to provide a pre-defined kubernetes secret for the auto scaling runner set helm chart [#2234](https://github.com/actions/actions-runner-controller/pull/2234) +1. Enhanced security posture by removing un-required permissions for the manager-role [#2260](https://github.com/actions/actions-runner-controller/pull/2260) +1. Enhanced our logging by returning an error when a runner group is defined in the values file but it's not created in GitHub [#2215](https://github.com/actions/actions-runner-controller/pull/2215) +1. Fixed helm charts issues that were preventing the use of DinD [#2291](https://github.com/actions/actions-runner-controller/pull/2291) +1. Fixed a bug that was preventing runner scale from being removed from the backend when they were deleted from the cluster [#2255](https://github.com/actions/actions-runner-controller/pull/2255) [#2223](https://github.com/actions/actions-runner-controller/pull/2223) +1. Fixed bugs with the helm chart definitions preventing certain values from being set [#2222](https://github.com/actions/actions-runner-controller/pull/2222) +1. Fixed a bug that prevented the configuration of a runner group for a runner scale set [#2216](https://github.com/actions/actions-runner-controller/pull/2216) + +#### Log + +- [1c7b7f4](https://github.com/actions/actions-runner-controller/commit/1c7b7f4) Bump arc-2 chart version and prepare 0.2.0 release [#2313](https://github.com/actions/actions-runner-controller/pull/2313) +- [73e22a1](https://github.com/actions/actions-runner-controller/commit/73e22a1) Disable metrics serving in proxy tests [#2307](https://github.com/actions/actions-runner-controller/pull/2307) +- [9b44f00](https://github.com/actions/actions-runner-controller/commit/9b44f00) Documentation corrections [#2116](https://github.com/actions/actions-runner-controller/pull/2116) +- [6b4250c](https://github.com/actions/actions-runner-controller/commit/6b4250c) Add support for proxy [#2286](https://github.com/actions/actions-runner-controller/pull/2286) +- [ced8822](https://github.com/actions/actions-runner-controller/commit/ced8822) Resolves the erroneous webhook scale down due to check runs [#2119](https://github.com/actions/actions-runner-controller/pull/2119) +- [44c06c2](https://github.com/actions/actions-runner-controller/commit/44c06c2) fix: case-insensitive webhook label matching [#2302](https://github.com/actions/actions-runner-controller/pull/2302) +- [4103fe3](https://github.com/actions/actions-runner-controller/commit/4103fe3) Use DOCKER_IMAGE_NAME instead of NAME to avoid conflict. [#2303](https://github.com/actions/actions-runner-controller/pull/2303) +- [a44fe04](https://github.com/actions/actions-runner-controller/commit/a44fe04) Fix manager crashloopback for ARC deployments without scaleset-related controllers [#2293](https://github.com/actions/actions-runner-controller/pull/2293) +- [274d0c8](https://github.com/actions/actions-runner-controller/commit/274d0c8) Added ability to configure log level from chart values [#2252](https://github.com/actions/actions-runner-controller/pull/2252) +- [256e08e](https://github.com/actions/actions-runner-controller/commit/256e08e) Ask runner to wait for docker daemon from DinD. [#2292](https://github.com/actions/actions-runner-controller/pull/2292) +- [f677fd5](https://github.com/actions/actions-runner-controller/commit/f677fd5) doc: Fix chart name for helm commands in docs [#2287](https://github.com/actions/actions-runner-controller/pull/2287) +- [d962714](https://github.com/actions/actions-runner-controller/commit/d962714) Fix helm chart when containerMode.type=dind. [#2291](https://github.com/actions/actions-runner-controller/pull/2291) +- [3886f28](https://github.com/actions/actions-runner-controller/commit/3886f28) Add EKS test environment Terraform templates [#2290](https://github.com/actions/actions-runner-controller/pull/2290) +- [dab9004](https://github.com/actions/actions-runner-controller/commit/dab9004) Added workflow to be triggered via rest api dispatch in e2e test [#2283](https://github.com/actions/actions-runner-controller/pull/2283) +- [dd8ec1a](https://github.com/actions/actions-runner-controller/commit/dd8ec1a) Add testserver package [#2281](https://github.com/actions/actions-runner-controller/pull/2281) +- [8e52a6d](https://github.com/actions/actions-runner-controller/commit/8e52a6d) EphemeralRunner: On cleanup, if pod is pending, delete from service [#2255](https://github.com/actions/actions-runner-controller/pull/2255) +- [9990243](https://github.com/actions/actions-runner-controller/commit/9990243) Early return if finalizer does not exist to make it more readable [#2262](https://github.com/actions/actions-runner-controller/pull/2262) +- [0891981](https://github.com/actions/actions-runner-controller/commit/0891981) Port ADRs from internal repo [#2267](https://github.com/actions/actions-runner-controller/pull/2267) +- [facae69](https://github.com/actions/actions-runner-controller/commit/facae69) Remove un-required permissions for the manager-role of the new `AutoScalingRunnerSet` [#2260](https://github.com/actions/actions-runner-controller/pull/2260) +- [8f62e35](https://github.com/actions/actions-runner-controller/commit/8f62e35) Add options to multi client [#2257](https://github.com/actions/actions-runner-controller/pull/2257) +- [55951c2](https://github.com/actions/actions-runner-controller/commit/55951c2) Add new workflow to automate runner updates [#2247](https://github.com/actions/actions-runner-controller/pull/2247) +- [c4297d2](https://github.com/actions/actions-runner-controller/commit/c4297d2) Avoid deleting scale set if annotation is not parsable or if it does not exist [#2239](https://github.com/actions/actions-runner-controller/pull/2239) +- [0774f06](https://github.com/actions/actions-runner-controller/commit/0774f06) ADR: automate runner updates [#2244](https://github.com/actions/actions-runner-controller/pull/2244) +- [92ab11b](https://github.com/actions/actions-runner-controller/commit/92ab11b) Use UUID v5 for client identifiers [#2241](https://github.com/actions/actions-runner-controller/pull/2241) +- [7414dc6](https://github.com/actions/actions-runner-controller/commit/7414dc6) Add Identifier to actions.Client [#2237](https://github.com/actions/actions-runner-controller/pull/2237) +- [34efb9d](https://github.com/actions/actions-runner-controller/commit/34efb9d) Add documentation to update ARC with prometheus CRDs needed by actions metrics server [#2209](https://github.com/actions/actions-runner-controller/pull/2209) +- [fbad561](https://github.com/actions/actions-runner-controller/commit/fbad561) Allow provide pre-defined kubernetes secret when helm-install AutoScalingRunnerSet [#2234](https://github.com/actions/actions-runner-controller/pull/2234) +- [a5cef7e](https://github.com/actions/actions-runner-controller/commit/a5cef7e) Resolve CI break due to bad merge. [#2236](https://github.com/actions/actions-runner-controller/pull/2236) +- [1f4fe46](https://github.com/actions/actions-runner-controller/commit/1f4fe46) Delete RunnerScaleSet on service when AutoScalingRunnerSet is deleted. [#2223](https://github.com/actions/actions-runner-controller/pull/2223) +- [067686c](https://github.com/actions/actions-runner-controller/commit/067686c) Fix typos and markdown structure in troubleshooting guide [#2148](https://github.com/actions/actions-runner-controller/pull/2148) +- [df12e00](https://github.com/actions/actions-runner-controller/commit/df12e00) Remove network requests from actions.NewClient [#2219](https://github.com/actions/actions-runner-controller/pull/2219) +- [cc26593](https://github.com/actions/actions-runner-controller/commit/cc26593) Skip CT when list-changed=false. [#2228](https://github.com/actions/actions-runner-controller/pull/2228) +- [835eac7](https://github.com/actions/actions-runner-controller/commit/835eac7) Fix helm charts when pass values file. [#2222](https://github.com/actions/actions-runner-controller/pull/2222) +- [01e9dd3](https://github.com/actions/actions-runner-controller/commit/01e9dd3) Update Validate ARC workflow to go 1.19 [#2220](https://github.com/actions/actions-runner-controller/pull/2220) +- [8038181](https://github.com/actions/actions-runner-controller/commit/8038181) Allow update runner group for AutoScalingRunnerSet [#2216](https://github.com/actions/actions-runner-controller/pull/2216) +- [219ba5b](https://github.com/actions/actions-runner-controller/commit/219ba5b) chore(deps): bump sigs.k8s.io/controller-runtime from 0.13.1 to 0.14.1 [#2132](https://github.com/actions/actions-runner-controller/pull/2132) +- [b09e3a2](https://github.com/actions/actions-runner-controller/commit/b09e3a2) Return error for non-existing runner group. [#2215](https://github.com/actions/actions-runner-controller/pull/2215) +- [7ea60e4](https://github.com/actions/actions-runner-controller/commit/7ea60e4) Fix intermittent image push failures to GHCR [#2214](https://github.com/actions/actions-runner-controller/pull/2214) +- [c8918f5](https://github.com/actions/actions-runner-controller/commit/c8918f5) Fix URL for authenticating using a GitHub app [#2206](https://github.com/actions/actions-runner-controller/pull/2206) +- [d57d17f](https://github.com/actions/actions-runner-controller/commit/d57d17f) Add support for custom CA in actions.Client [#2199](https://github.com/actions/actions-runner-controller/pull/2199) +- [6e69c75](https://github.com/actions/actions-runner-controller/commit/6e69c75) chore(deps): bump github.com/hashicorp/go-retryablehttp from 0.7.1 to 0.7.2 [#2203](https://github.com/actions/actions-runner-controller/pull/2203) +- [882bfab](https://github.com/actions/actions-runner-controller/commit/882bfab) Renaming autoScaling to autoscaling in tests matching the convention [#2201](https://github.com/actions/actions-runner-controller/pull/2201) +- [3327f62](https://github.com/actions/actions-runner-controller/commit/3327f62) Refactor actions.Client with options to help extensibility [#2193](https://github.com/actions/actions-runner-controller/pull/2193) +- [282f2dd](https://github.com/actions/actions-runner-controller/commit/282f2dd) chore(deps): bump github.com/onsi/gomega from 1.20.2 to 1.25.0 [#2169](https://github.com/actions/actions-runner-controller/pull/2169) +- [d67f808](https://github.com/actions/actions-runner-controller/commit/d67f808) Include nikola-jokic in CODEOWNERS file [#2184](https://github.com/actions/actions-runner-controller/pull/2184) +- [4932412](https://github.com/actions/actions-runner-controller/commit/4932412) Fix L0 test to make it more reliable. [#2178](https://github.com/actions/actions-runner-controller/pull/2178) +- [6da1cde](https://github.com/actions/actions-runner-controller/commit/6da1cde) Update runner version to 2.301.1 [#2182](https://github.com/actions/actions-runner-controller/pull/2182) +- [f9bae70](https://github.com/actions/actions-runner-controller/commit/f9bae70) Add distinct namespace best practice note [#2181](https://github.com/actions/actions-runner-controller/pull/2181) +- [05a3908](https://github.com/actions/actions-runner-controller/commit/05a3908) Add arc-2 quickstart guide [#2180](https://github.com/actions/actions-runner-controller/pull/2180) +- [606ed1b](https://github.com/actions/actions-runner-controller/commit/606ed1b) Add Repository information to Runner Status [#2093](https://github.com/actions/actions-runner-controller/pull/2093) From cf481ea47d0fd8e46009b4632274139120704959 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Carlos=20Ferra=20de=20Almeida?= Date: Fri, 24 Feb 2023 12:19:51 +0000 Subject: [PATCH 082/561] [Docs] Fix typo (#2314) --- docs/automatically-scaling-runners.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/automatically-scaling-runners.md b/docs/automatically-scaling-runners.md index 747800da3f..fc860063ef 100644 --- a/docs/automatically-scaling-runners.md +++ b/docs/automatically-scaling-runners.md @@ -693,7 +693,7 @@ Note that this feature is currently intended for use with runner pods being term For example, a runner pod can be terminated prematurely by cluster-autoscaler when it's about to terminate the node on cluster scale down. All the variants of RunnerDeployment and RunnerSet managed runner pods, including runners with dockerd sidecars, rootless and rootful dind runners are affected by it. For dind runner pods only, you can use this feature to fix or alleviate the issue. -To be clear, an increase/decrease in the desired replicas of RunnerDeployment and RunnerSet will never result in worklfow jobs being termianted prematurely. +To be clear, an increase/decrease in the desired replicas of RunnerDeployment and RunnerSet will never result in worklfow jobs being terminated prematurely. That's because it's handled BEFORE the runner pod is terminated, by ARC respective controller. For anyone interested in improving it, adding a dedicated pod finalizer for this issue will never work. From 7d416c43488a840b9171e2787f40bd6fed3529cc Mon Sep 17 00:00:00 2001 From: Dimitar Date: Sat, 25 Feb 2023 05:18:29 +0000 Subject: [PATCH 083/561] Allow custom graceful termination and loadBalancerSourceRanges for the githubwebhook service (#2305) Co-authored-by: Dimitar Hristov --- Dockerfile | 4 ++- .../templates/githubwebhook.deployment.yaml | 8 ++++- .../templates/githubwebhook.service.yaml | 6 ++++ charts/actions-runner-controller/values.yaml | 3 ++ cmd/sleep/main.go | 33 +++++++++++++++++++ 5 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 cmd/sleep/main.go diff --git a/Dockerfile b/Dockerfile index 51a60255fd..4c5f8fbb54 100644 --- a/Dockerfile +++ b/Dockerfile @@ -39,7 +39,8 @@ RUN --mount=target=. \ go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}'" -o /out/manager main.go && \ go build -trimpath -ldflags="-s -w" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \ go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \ - go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver + go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver && \ + go build -trimpath -ldflags="-s -w" -o /out/sleep ./cmd/sleep # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details @@ -51,6 +52,7 @@ COPY --from=builder /out/manager . COPY --from=builder /out/github-webhook-server . COPY --from=builder /out/actions-metrics-server . COPY --from=builder /out/github-runnerscaleset-listener . +COPY --from=builder /out/sleep . USER 65532:65532 diff --git a/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml b/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml index 373ddb869f..86a4aacd8c 100644 --- a/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml +++ b/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml @@ -56,6 +56,12 @@ spec: {{- end }} command: - "/github-webhook-server" + {{- if .Values.githubWebhookServer.lifecycle }} + {{- with .Values.githubWebhookServer.lifecycle }} + lifecycle: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} env: - name: GITHUB_WEBHOOK_SECRET_TOKEN valueFrom: @@ -148,7 +154,7 @@ spec: securityContext: {{- toYaml .Values.securityContext | nindent 12 }} {{- end }} - terminationGracePeriodSeconds: 10 + terminationGracePeriodSeconds: {{ .Values.githubWebhookServer.terminationGracePeriodSeconds }} {{- with .Values.githubWebhookServer.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/charts/actions-runner-controller/templates/githubwebhook.service.yaml b/charts/actions-runner-controller/templates/githubwebhook.service.yaml index daeb790beb..99a7ea2c25 100644 --- a/charts/actions-runner-controller/templates/githubwebhook.service.yaml +++ b/charts/actions-runner-controller/templates/githubwebhook.service.yaml @@ -23,4 +23,10 @@ spec: {{- end }} selector: {{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }} + {{- if .Values.githubWebhookServer.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $ip := .Values.githubWebhookServer.service.loadBalancerSourceRanges }} + - {{ $ip -}} + {{- end }} + {{- end }} {{- end }} diff --git a/charts/actions-runner-controller/values.yaml b/charts/actions-runner-controller/values.yaml index 4d724051ba..91dec2bee9 100644 --- a/charts/actions-runner-controller/values.yaml +++ b/charts/actions-runner-controller/values.yaml @@ -240,6 +240,7 @@ githubWebhookServer: protocol: TCP name: http #nodePort: someFixedPortForUseWithTerraformCdkCfnEtc + loadBalancerSourceRanges: [] ingress: enabled: false ingressClassName: "" @@ -276,6 +277,8 @@ githubWebhookServer: # minAvailable: 1 # maxUnavailable: 3 # queueLimit: 100 + terminationGracePeriodSeconds: 10 + lifecycle: {} actionsMetrics: serviceAnnotations: {} diff --git a/cmd/sleep/main.go b/cmd/sleep/main.go new file mode 100644 index 0000000000..b74f45033f --- /dev/null +++ b/cmd/sleep/main.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The actions-runner-controller authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "time" +) + +var Seconds int + +func main() { + fmt.Printf("sleeping for %d seconds\n", Seconds) + time.Sleep(time.Duration(Seconds) * time.Second) + fmt.Println("done sleeping") +} + +func init() { + flag.IntVar(&Seconds, "seconds", 60, "Number of seconds to sleep") + flag.Parse() +} From d537c80b3349387983de2c8a4265394ff054383b Mon Sep 17 00:00:00 2001 From: Kirill Bilchenko Date: Sat, 25 Feb 2023 08:02:22 +0100 Subject: [PATCH 084/561] Add reposity name and full name for prometheus labels in actions metrics (#2218) Co-authored-by: Yusuke Kuoka --- go.mod | 1 + go.sum | 2 ++ pkg/actionsmetrics/event_reader.go | 41 ++++++++++++++++++++++-------- 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 81d11f4666..640c3216a6 100644 --- a/go.mod +++ b/go.mod @@ -57,6 +57,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-github/v45 v45.2.0 // indirect + github.com/google/go-github/v50 v50.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/gruntwork-io/go-commons v0.8.0 // indirect diff --git a/go.sum b/go.sum index 7542f60efc..5d3393cf1e 100644 --- a/go.sum +++ b/go.sum @@ -178,6 +178,8 @@ github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FC github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28= github.com/google/go-github/v47 v47.1.0 h1:Cacm/WxQBOa9lF0FT0EMjZ2BWMetQ1TQfyurn4yF1z8= github.com/google/go-github/v47 v47.1.0/go.mod h1:VPZBXNbFSJGjyjFRUKo9vZGawTajnWzC/YjGw/oFKi0= +github.com/google/go-github/v50 v50.0.0 h1:gdO1AeuSZZK4iYWwVbjni7zg8PIQhp7QfmPunr016Jk= +github.com/google/go-github/v50 v50.0.0/go.mod h1:Ev4Tre8QoKiolvbpOSG3FIi4Mlon3S2Nt9W5JYqKiwA= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= diff --git a/pkg/actionsmetrics/event_reader.go b/pkg/actionsmetrics/event_reader.go index 5516e86b01..28b09e4093 100644 --- a/pkg/actionsmetrics/event_reader.go +++ b/pkg/actionsmetrics/event_reader.go @@ -10,7 +10,7 @@ import ( "time" "github.com/go-logr/logr" - gogithub "github.com/google/go-github/v47/github" + gogithub "github.com/google/go-github/v50/github" "github.com/prometheus/client_golang/prometheus" "github.com/actions/actions-runner-controller/github" @@ -59,11 +59,34 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in } // collect labels - labels := make(prometheus.Labels) + var ( + labels = make(prometheus.Labels) + keysAndValues = []interface{}{"job_id", fmt.Sprint(*e.WorkflowJob.ID)} + ) runsOn := strings.Join(e.WorkflowJob.Labels, `,`) labels["runs_on"] = runsOn + labels["job_name"] = *e.WorkflowJob.Name + keysAndValues = append(keysAndValues, "job_name", *e.WorkflowJob.Name) + + if e.Repo != nil { + if n := e.Repo.Name; n != nil { + labels["repository"] = *n + keysAndValues = append(keysAndValues, "repository", *n) + } + if n := e.Repo.FullName; n != nil { + labels["repository_full_name"] = *n + keysAndValues = append(keysAndValues, "repository_full_name", *n) + } + } + + if e.Org != nil { + if n := e.Org.Name; n != nil { + labels["organization"] = *e.Org.Name + keysAndValues = append(keysAndValues, "organization", *n) + } + } // switch on job status switch action := e.GetAction(); action { @@ -82,10 +105,11 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in reader.Log.Error(err, "reading workflow job log") return } else { - reader.Log.Info("reading workflow_job logs", - "job_name", *e.WorkflowJob.Name, - "job_id", fmt.Sprint(*e.WorkflowJob.ID), - ) + reader.Log.WithValues("job_name", *e.WorkflowJob.Name, "job_id", fmt.Sprint(*e.WorkflowJob.ID), "repository", *e.Repo.Name, "repository_full_name", *e.Repo.FullName) + if len(*e.Org.Name) > 0 { + reader.Log.WithValues("organization", *e.Org.Name) + } + reader.Log.Info("reading workflow_job logs") } githubWorkflowJobQueueDurationSeconds.With(labels).Observe(parseResult.QueueTime.Seconds()) @@ -101,10 +125,7 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in reader.Log.Error(err, "reading workflow job log") return } else { - reader.Log.Info("reading workflow_job logs", - "job_name", *e.WorkflowJob.Name, - "job_id", fmt.Sprint(*e.WorkflowJob.ID), - ) + reader.Log.Info("reading workflow_job logs", keysAndValues...) } if *e.WorkflowJob.Conclusion == "failure" { From 77ed874157cc09cf648a7a081a228106cbabdfc7 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Mon, 27 Feb 2023 07:34:29 +0900 Subject: [PATCH 085/561] Fix actions-metrics-server segfault issue (#2325) --- pkg/actionsmetrics/event_reader.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/actionsmetrics/event_reader.go b/pkg/actionsmetrics/event_reader.go index 28b09e4093..4beb33f5d4 100644 --- a/pkg/actionsmetrics/event_reader.go +++ b/pkg/actionsmetrics/event_reader.go @@ -131,6 +131,10 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in if *e.WorkflowJob.Conclusion == "failure" { failedStep := "null" for i, step := range e.WorkflowJob.Steps { + conclusion := step.Conclusion + if conclusion == nil { + continue + } // *step.Conclusion ~ // "success", @@ -141,11 +145,11 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in // "timed_out", // "action_required", // null - if *step.Conclusion == "failure" { + if *conclusion == "failure" { failedStep = fmt.Sprint(i) break } - if *step.Conclusion == "timed_out" { + if *conclusion == "timed_out" { failedStep = fmt.Sprint(i) parseResult.ExitCode = "timed_out" break From db37b594d45833882569286e14c7ea481f2d1a4f Mon Sep 17 00:00:00 2001 From: Ava Stancu Date: Mon, 27 Feb 2023 15:36:15 +0100 Subject: [PATCH 086/561] Avastancu/arc e2e test linux vm (#2285) --- .github/actions/e2e-arc-test/action.yaml | 36 +++++ .../workflows/e2e-test-dispatch-workflow.yaml | 2 +- .github/workflows/e2e-test-linux-vm.yaml | 23 +++ Makefile | 2 +- test_e2e_arc/arc_jobs_test.go | 132 ++++++++++++++++++ 5 files changed, 193 insertions(+), 2 deletions(-) create mode 100644 .github/actions/e2e-arc-test/action.yaml create mode 100644 .github/workflows/e2e-test-linux-vm.yaml create mode 100644 test_e2e_arc/arc_jobs_test.go diff --git a/.github/actions/e2e-arc-test/action.yaml b/.github/actions/e2e-arc-test/action.yaml new file mode 100644 index 0000000000..24c4032f38 --- /dev/null +++ b/.github/actions/e2e-arc-test/action.yaml @@ -0,0 +1,36 @@ +name: 'E2E ARC Test Action' +description: 'Includes common arc installation, setup and test file run' + +inputs: + github-token: + description: 'JWT generated with Github App inputs' + required: true + +runs: + using: "composite" + steps: + - name: Install ARC + run: helm install arc --namespace "arc-systems" --create-namespace ./charts/actions-runner-controller-2 + shell: bash + - name: Get datetime + # We are using this value further in the runner installation to avoid runner name collision that are a risk with hard coded values. + # A datetime including the 3 nanoseconds are a good option for this and also adds to readability and runner sorting if needed. + run: echo "DATE_TIME=$(date +'%Y-%m-%d-%H-%M-%S-%3N')" >> $GITHUB_ENV + shell: bash + - name: Install runners + run: | + helm install "arc-runner-${{ env.DATE_TIME }}" \ + --namespace "arc-runners" \ + --create-namespace \ + --set githubConfigUrl="https://github.com/actions/actions-runner-controller" \ + --set githubConfigSecret.github_token="${{ inputs.github-token }}" \ + ./charts/auto-scaling-runner-set \ + --debug + kubectl get pods -A + shell: bash + - name: Test ARC scales pods up and down + run: | + export GITHUB_TOKEN="${{ inputs.github-token }}" + export DATE_TIME="${{ env.DATE_TIME }}" + go test ./test_e2e_arc -v + shell: bash diff --git a/.github/workflows/e2e-test-dispatch-workflow.yaml b/.github/workflows/e2e-test-dispatch-workflow.yaml index 4e0e5d6ff1..baf34b503e 100644 --- a/.github/workflows/e2e-test-dispatch-workflow.yaml +++ b/.github/workflows/e2e-test-dispatch-workflow.yaml @@ -1,4 +1,4 @@ -name: ARC-REUSABLE-WORKFLOW +name: ARC Reusable Workflow on: workflow_dispatch: inputs: diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml new file mode 100644 index 0000000000..4edee662a1 --- /dev/null +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -0,0 +1,23 @@ +name: CI ARC E2E Linux VM Test + +on: + workflow_dispatch: + +jobs: + setup-steps: + runs-on: [ubuntu-latest] + steps: + - uses: actions/checkout@v3 + - name: Create Kind cluster + run: | + PATH=$(go env GOPATH)/bin:$PATH + kind create cluster --name e2e-test + - name: Get Token + id: get_workflow_token + uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db + with: + application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} + application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} + - uses: ./.github/actions/e2e-arc-test + with: + github-token: ${{ steps.get_workflow_token.outputs.token }} diff --git a/Makefile b/Makefile index 46c3a5e3e8..595183384e 100644 --- a/Makefile +++ b/Makefile @@ -73,7 +73,7 @@ GO_TEST_ARGS ?= -short # Run tests test: generate fmt vet manifests shellcheck - go test $(GO_TEST_ARGS) ./... -coverprofile cover.out + go test $(GO_TEST_ARGS) `go list ./... | grep -v ./test_e2e_arc` -coverprofile cover.out go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers/actions.summerwind.net test-with-deps: kube-apiserver etcd kubectl diff --git a/test_e2e_arc/arc_jobs_test.go b/test_e2e_arc/arc_jobs_test.go new file mode 100644 index 0000000000..812859bc89 --- /dev/null +++ b/test_e2e_arc/arc_jobs_test.go @@ -0,0 +1,132 @@ +package e2e_arc + +import ( + "bytes" + "context" + "fmt" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +type podCountsByType struct { + controllers int + listeners int + runners int +} + +func getPodsByType(clientset *kubernetes.Clientset) podCountsByType { + arc_namespace := "arc-systems" + availableArcPods, err := clientset.CoreV1().Pods(arc_namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + panic(err.Error()) + } + runners_namespace := "arc-runners" + availableRunnerPods, err := clientset.CoreV1().Pods(runners_namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + panic(err.Error()) + } + podsByType := podCountsByType{} + for _, pod := range availableArcPods.Items { + if strings.Contains(pod.Name, "controller") { + podsByType.controllers += 1 + } + if strings.Contains(pod.Name, "listener") { + podsByType.listeners += 1 + } + } + for _, pod := range availableRunnerPods.Items { + if strings.Contains(pod.Name, "runner") { + podsByType.runners += 1 + } + } + return podsByType +} + +func pollForClusterState(clientset *kubernetes.Clientset, expectedPodsCount podCountsByType, maxTime int) bool { + sleepTime := 5 + maxRetries := maxTime / sleepTime + success := false + for i := 0; i <= maxRetries; i++ { + time.Sleep(time.Second * time.Duration(sleepTime)) + availablePodsCount := getPodsByType(clientset) + if availablePodsCount == expectedPodsCount { + success = true + break + } else { + fmt.Printf("%v", availablePodsCount) + } + } + return success +} + +func TestARCJobs(t *testing.T) { + configFile := filepath.Join( + os.Getenv("HOME"), ".kube", "config", + ) + + config, err := clientcmd.BuildConfigFromFlags("", configFile) + if err != nil { + t.Fatal(err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + + t.Run("Get available pods before job run", func(t *testing.T) { + expectedPodsCount := podCountsByType{1, 1, 0} + success := pollForClusterState(clientset, expectedPodsCount, 60) + if !success { + t.Fatal("Expected pods count did not match available pods count before job run.") + } + }, + ) + t.Run("Get available pods during job run", func(t *testing.T) { + c := http.Client{} + dateTime := os.Getenv("DATE_TIME") + // We are triggering manually a workflow that already exists in the repo. + // This workflow is expected to spin up a number of runner pods matching the runners value set in podCountsByType. + url := "https://api.github.com/repos/actions/actions-runner-controller/actions/workflows/e2e-test-dispatch-workflow.yaml/dispatches" + jsonStr := []byte(fmt.Sprintf(`{"ref":"master", "inputs":{"date_time":"%s"}}`, dateTime)) + + req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr)) + if err != nil { + t.Fatal(err) + } + ght := os.Getenv("GITHUB_TOKEN") + req.Header.Add("Accept", "application/vnd.github+json") + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ght)) + req.Header.Add("X-GitHub-Api-Version", "2022-11-28") + + resp, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + expectedPodsCount := podCountsByType{1, 1, 3} + success := pollForClusterState(clientset, expectedPodsCount, 120) + if !success { + t.Fatal("Expected pods count did not match available pods count during job run.") + } + + }, + ) + t.Run("Get available pods after job run", func(t *testing.T) { + expectedPodsCount := podCountsByType{1, 1, 0} + success := pollForClusterState(clientset, expectedPodsCount, 120) + if !success { + t.Fatal("Expected pods count did not match available pods count after job run.") + } + }, + ) +} From 6829f5e6b95052e2cf7a573f370262c59d6e0ee2 Mon Sep 17 00:00:00 2001 From: Ava Stancu Date: Mon, 27 Feb 2023 22:30:40 +0100 Subject: [PATCH 087/561] Added org for getting the workflow token job as it errored without (#2334) --- .github/workflows/e2e-test-linux-vm.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 4edee662a1..0c70e9bf4f 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -2,6 +2,9 @@ name: CI ARC E2E Linux VM Test on: workflow_dispatch: + +env: + TARGET_ORG: actions-runner-controller jobs: setup-steps: @@ -18,6 +21,7 @@ jobs: with: application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} + organization: ${{ env.TARGET_ORG }} - uses: ./.github/actions/e2e-arc-test with: github-token: ${{ steps.get_workflow_token.outputs.token }} From f4643353637cbc47ca18c005b07b1db133920eb9 Mon Sep 17 00:00:00 2001 From: Ava Stancu Date: Tue, 28 Feb 2023 20:26:01 +0100 Subject: [PATCH 088/561] Change e2e config url (#2338) --- .github/actions/e2e-arc-test/action.yaml | 5 ++++- .github/workflows/e2e-test-linux-vm.yaml | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/actions/e2e-arc-test/action.yaml b/.github/actions/e2e-arc-test/action.yaml index 24c4032f38..6883618a3e 100644 --- a/.github/actions/e2e-arc-test/action.yaml +++ b/.github/actions/e2e-arc-test/action.yaml @@ -5,6 +5,9 @@ inputs: github-token: description: 'JWT generated with Github App inputs' required: true + config-url: + description: "URL of the repo, org or enterprise where the runner scale sets will be registered" + required: true runs: using: "composite" @@ -22,7 +25,7 @@ runs: helm install "arc-runner-${{ env.DATE_TIME }}" \ --namespace "arc-runners" \ --create-namespace \ - --set githubConfigUrl="https://github.com/actions/actions-runner-controller" \ + --set githubConfigUrl="${{ inputs.config-url }}"\ --set githubConfigSecret.github_token="${{ inputs.github-token }}" \ ./charts/auto-scaling-runner-set \ --debug diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 0c70e9bf4f..09161e6943 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -25,3 +25,4 @@ jobs: - uses: ./.github/actions/e2e-arc-test with: github-token: ${{ steps.get_workflow_token.outputs.token }} + config-url: "https://github.com/actions-runner-controller/arc_e2e_test_dummy" From 2105f687f3c290975b92adfc4eeb3aa7c5436a14 Mon Sep 17 00:00:00 2001 From: Milas Bowman Date: Tue, 28 Feb 2023 17:18:13 -0500 Subject: [PATCH 089/561] Upgrade Docker Compose to v2.16.0 (#2327) --- runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile | 2 +- runner/actions-runner-dind.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner-dind.ubuntu-22.04.dockerfile | 2 +- runner/actions-runner.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner.ubuntu-22.04.dockerfile | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile index 9cb3b34379..1ee6f2b899 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile @@ -5,7 +5,7 @@ ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ENV CHANNEL=stable -ARG DOCKER_COMPOSE_VERSION=v2.6.0 +ARG DOCKER_COMPOSE_VERSION=v2.16.0 ARG DUMB_INIT_VERSION=1.2.5 # Other arguments diff --git a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile index d91fc7ef07..b5f02d9088 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile @@ -5,7 +5,7 @@ ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ENV CHANNEL=stable -ARG DOCKER_COMPOSE_VERSION=v2.12.2 +ARG DOCKER_COMPOSE_VERSION=v2.16.0 ARG DUMB_INIT_VERSION=1.2.5 ARG RUNNER_USER_UID=1001 diff --git a/runner/actions-runner-dind.ubuntu-20.04.dockerfile b/runner/actions-runner-dind.ubuntu-20.04.dockerfile index f0ea6f07b8..fb83c166fe 100644 --- a/runner/actions-runner-dind.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-20.04.dockerfile @@ -6,7 +6,7 @@ ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.18 -ARG DOCKER_COMPOSE_VERSION=v2.6.0 +ARG DOCKER_COMPOSE_VERSION=v2.16.0 ARG DUMB_INIT_VERSION=1.2.5 # Use 1001 and 121 for compatibility with GitHub-hosted runners diff --git a/runner/actions-runner-dind.ubuntu-22.04.dockerfile b/runner/actions-runner-dind.ubuntu-22.04.dockerfile index 37f9c3e15d..65e0365856 100644 --- a/runner/actions-runner-dind.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-22.04.dockerfile @@ -6,7 +6,7 @@ ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.21 -ARG DOCKER_COMPOSE_VERSION=v2.12.2 +ARG DOCKER_COMPOSE_VERSION=v2.16.0 ARG DUMB_INIT_VERSION=1.2.5 ARG RUNNER_USER_UID=1001 ARG DOCKER_GROUP_GID=121 diff --git a/runner/actions-runner.ubuntu-20.04.dockerfile b/runner/actions-runner.ubuntu-20.04.dockerfile index 6017b0ec19..0339174fd9 100644 --- a/runner/actions-runner.ubuntu-20.04.dockerfile +++ b/runner/actions-runner.ubuntu-20.04.dockerfile @@ -6,7 +6,7 @@ ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.18 -ARG DOCKER_COMPOSE_VERSION=v2.6.0 +ARG DOCKER_COMPOSE_VERSION=v2.16.0 ARG DUMB_INIT_VERSION=1.2.5 # Use 1001 and 121 for compatibility with GitHub-hosted runners diff --git a/runner/actions-runner.ubuntu-22.04.dockerfile b/runner/actions-runner.ubuntu-22.04.dockerfile index 4150f77d9c..275601753d 100644 --- a/runner/actions-runner.ubuntu-22.04.dockerfile +++ b/runner/actions-runner.ubuntu-22.04.dockerfile @@ -6,7 +6,7 @@ ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.21 -ARG DOCKER_COMPOSE_VERSION=v2.12.2 +ARG DOCKER_COMPOSE_VERSION=v2.16.0 ARG DUMB_INIT_VERSION=1.2.5 ARG RUNNER_USER_UID=1001 ARG DOCKER_GROUP_GID=121 From 11104f4b5001029163c355e1c87553057494c789 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Mar 2023 07:19:47 +0900 Subject: [PATCH 090/561] chore(deps): bump sigs.k8s.io/controller-runtime from 0.14.1 to 0.14.4 (#2261) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Yusuke Kuoka --- Makefile | 97 +++++++++++++++++++++++++++++--------------------------- go.mod | 14 ++++---- go.sum | 32 +++++++------------ 3 files changed, 70 insertions(+), 73 deletions(-) diff --git a/Makefile b/Makefile index 595183384e..2902d82ff7 100644 --- a/Makefile +++ b/Makefile @@ -113,59 +113,64 @@ manifests-gen-crds: controller-gen yq for YAMLFILE in config/crd/bases/actions*.yaml; do \ $(YQ) '.spec.preserveUnknownFields = false' --inplace "$$YAMLFILE" ; \ done + make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-type + make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-map-keys + +manifests-gen-crds-fix: DELETE_KEY ?= +manifests-gen-crds-fix: #runners - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.dockerdContainerResources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.dockerdContainerResources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runners.yaml #runnerreplicasets - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.dockerdContainerResources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.dockerdContainerResources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml #runnerdeployments - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.dockerdContainerResources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.sidecarContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.dockerdContainerResources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml #runnersets - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.volumeClaimTemplates.items.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.volumeClaimTemplates.items.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.workVolumeClaimTemplate.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.summerwind.dev_runnersets.yaml #autoscalingrunnersets - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_autoscalingrunnersets.yaml #ehemeralrunnersets - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.ephemeralRunnerSpec.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunnersets.yaml # ephemeralrunners - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.containers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.initContainers.items.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml - $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.x-kubernetes-list-type)' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.ephemeralContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.containers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.initContainers.items.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml + $(YQ) 'del(.spec.versions[].schema.openAPIV3Schema.properties.spec.properties.spec.properties.volumes.items.properties.ephemeral.properties.volumeClaimTemplate.properties.spec.properties.resources.properties.claims.$(DELETE_KEY))' --inplace config/crd/bases/actions.github.com_ephemeralrunners.yaml chart-crds: cp config/crd/bases/*.yaml charts/actions-runner-controller/crds/ diff --git a/go.mod b/go.mod index 640c3216a6..c8987d710d 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/golang-jwt/jwt/v4 v4.4.1 github.com/google/go-cmp v0.5.9 github.com/google/go-github/v47 v47.1.0 + github.com/google/go-github/v50 v50.0.0 github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 @@ -29,10 +30,10 @@ require ( golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 gomodules.xyz/jsonpatch/v2 v2.2.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.26.0 - k8s.io/apimachinery v0.26.0 - k8s.io/client-go v0.26.0 - sigs.k8s.io/controller-runtime v0.14.1 + k8s.io/api v0.26.1 + k8s.io/apimachinery v0.26.1 + k8s.io/client-go v0.26.1 + sigs.k8s.io/controller-runtime v0.14.4 sigs.k8s.io/yaml v1.3.0 ) @@ -57,7 +58,6 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-github/v45 v45.2.0 // indirect - github.com/google/go-github/v50 v50.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/gruntwork-io/go-commons v0.8.0 // indirect @@ -95,8 +95,8 @@ require ( google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.26.0 // indirect - k8s.io/component-base v0.26.0 // indirect + k8s.io/apiextensions-apiserver v0.26.1 // indirect + k8s.io/component-base v0.26.1 // indirect k8s.io/klog/v2 v2.80.1 // indirect k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect diff --git a/go.sum b/go.sum index 5d3393cf1e..2198739e27 100644 --- a/go.sum +++ b/go.sum @@ -450,8 +450,6 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -520,14 +518,10 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -537,8 +531,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -707,16 +699,16 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= -k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= -k8s.io/apiextensions-apiserver v0.26.0 h1:Gy93Xo1eg2ZIkNX/8vy5xviVSxwQulsnUdQ00nEdpDo= -k8s.io/apiextensions-apiserver v0.26.0/go.mod h1:7ez0LTiyW5nq3vADtK6C3kMESxadD51Bh6uz3JOlqWQ= -k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= -k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= -k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= -k8s.io/component-base v0.26.0 h1:0IkChOCohtDHttmKuz+EP3j3+qKmV55rM9gIFTXA7Vs= -k8s.io/component-base v0.26.0/go.mod h1:lqHwlfV1/haa14F/Z5Zizk5QmzaVf23nQzCwVOQpfC8= +k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= +k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= +k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI= +k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM= +k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= +k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= +k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= +k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= +k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4= +k8s.io/component-base v0.26.1/go.mod h1:VHrLR0b58oC035w6YQiBSbtsf0ThuSwXP+p5dD/kAWU= k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= @@ -726,8 +718,8 @@ k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.14.1 h1:vThDes9pzg0Y+UbCPY3Wj34CGIYPgdmspPm2GIpxpzM= -sigs.k8s.io/controller-runtime v0.14.1/go.mod h1:GaRkrY8a7UZF0kqFFbUKG7n9ICiTY5T55P1RiE3UZlU= +sigs.k8s.io/controller-runtime v0.14.4 h1:Kd/Qgx5pd2XUL08eOV2vwIq3L9GhIbJ5Nxengbd4/0M= +sigs.k8s.io/controller-runtime v0.14.4/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= From 18f88dc2b740901cbacef1efb76ca98ac8ae7359 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Mar 2023 07:21:24 +0900 Subject: [PATCH 091/561] chore(deps): bump github.com/stretchr/testify from 1.8.0 to 1.8.2 (#2336) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index c8987d710d..c977395aee 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/onsi/gomega v1.25.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 - github.com/stretchr/testify v1.8.0 + github.com/stretchr/testify v1.8.2 github.com/teambition/rrule-go v1.8.0 go.uber.org/multierr v1.7.0 go.uber.org/zap v1.24.0 @@ -83,7 +83,7 @@ require ( github.com/prometheus/procfs v0.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.4.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect golang.org/x/crypto v0.1.0 // indirect diff --git a/go.sum b/go.sum index 2198739e27..56ef6bbc1b 100644 --- a/go.sum +++ b/go.sum @@ -341,8 +341,9 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -350,8 +351,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/teambition/rrule-go v1.8.0 h1:a/IX5s56hGkFF+nRlJUooZU/45OTeeldBGL29nDKIHw= github.com/teambition/rrule-go v1.8.0/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4= github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= From 861d897bcd419a872f5da796d7a717f5ddbd2bc1 Mon Sep 17 00:00:00 2001 From: dhawalseth Date: Tue, 28 Feb 2023 15:19:58 -0800 Subject: [PATCH 092/561] chart: Create actionsmetrics.secrets.yaml (#2208) Co-authored-by: Dhawal Seth Co-authored-by: Yusuke Kuoka --- acceptance/deploy.sh | 10 ++++++- .../templates/actionsmetrics.deployment.yaml | 12 ++++---- .../templates/actionsmetrics.secrets.yaml | 28 +++++++++++++++++++ .../templates/githubwebhook.deployment.yaml | 2 +- test/e2e/e2e_test.go | 3 ++ 5 files changed, 47 insertions(+), 8 deletions(-) create mode 100644 charts/actions-runner-controller/templates/actionsmetrics.secrets.yaml diff --git a/acceptance/deploy.sh b/acceptance/deploy.sh index 2a90b429c6..2b076010ca 100755 --- a/acceptance/deploy.sh +++ b/acceptance/deploy.sh @@ -35,7 +35,7 @@ else echo 'Skipped deploying secret "github-webhook-server". Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2 fi -if [ -n "${WEBHOOK_GITHUB_TOKEN}" ]; then +if [ -n "${WEBHOOK_GITHUB_TOKEN}" ] && [ -z "${CREATE_SECRETS_USING_HELM}" ]; then kubectl -n actions-runner-system delete secret \ actions-metrics-server || : kubectl -n actions-runner-system create secret generic \ @@ -69,6 +69,14 @@ if [ "${tool}" == "helm" ]; then flags+=( --set githubWebhookServer.logFormat=${LOG_FORMAT}) flags+=( --set actionsMetricsServer.logFormat=${LOG_FORMAT}) fi + if [ -n "${CREATE_SECRETS_USING_HELM}" ]; then + if [ -z "${WEBHOOK_GITHUB_TOKEN}" ]; then + echo 'Failed deploying secret "actions-metrics-server" using helm. Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2 + exit 1 + fi + flags+=( --set actionsMetricsServer.secret.create=true) + flags+=( --set actionsMetricsServer.secret.github_token=${WEBHOOK_GITHUB_TOKEN}) + fi set -vx diff --git a/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml b/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml index ebab267d10..5eac200262 100644 --- a/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml +++ b/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml @@ -15,7 +15,7 @@ spec: metadata: {{- with .Values.actionsMetricsServer.podAnnotations }} annotations: - kubectl.kubernetes.io/default-logs-container: "github-webhook-server" + kubectl.kubernetes.io/default-container: "actions-metrics-server" {{- toYaml . | nindent 8 }} {{- end }} labels: @@ -45,7 +45,7 @@ spec: {{- if .Values.runnerGithubURL }} - "--runner-github-url={{ .Values.runnerGithubURL }}" {{- end }} - {{- if .Values.actionsMetricsServer.logFormat }} + {{- if .Values.actionsMetricsServer.logFormat }} - "--log-format={{ .Values.actionsMetricsServer.logFormat }}" {{- end }} command: @@ -74,25 +74,25 @@ spec: valueFrom: secretKeyRef: key: github_token - name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }} + name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }} optional: true - name: GITHUB_APP_ID valueFrom: secretKeyRef: key: github_app_id - name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }} + name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }} optional: true - name: GITHUB_APP_INSTALLATION_ID valueFrom: secretKeyRef: key: github_app_installation_id - name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }} + name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }} optional: true - name: GITHUB_APP_PRIVATE_KEY valueFrom: secretKeyRef: key: github_app_private_key - name: {{ include "actions-runner-controller.githubWebhookServerSecretName" . }} + name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }} optional: true {{- if .Values.authSecret.github_basicauth_username }} - name: GITHUB_BASICAUTH_USERNAME diff --git a/charts/actions-runner-controller/templates/actionsmetrics.secrets.yaml b/charts/actions-runner-controller/templates/actionsmetrics.secrets.yaml new file mode 100644 index 0000000000..a7128b4c31 --- /dev/null +++ b/charts/actions-runner-controller/templates/actionsmetrics.secrets.yaml @@ -0,0 +1,28 @@ +{{- if .Values.actionsMetricsServer.enabled }} +{{- if .Values.actionsMetricsServer.secret.create }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "actions-runner-controller.labels" . | nindent 4 }} +type: Opaque +data: +{{- if .Values.actionsMetricsServer.secret.github_webhook_secret_token }} + github_webhook_secret_token: {{ .Values.actionsMetricsServer.secret.github_webhook_secret_token | toString | b64enc }} +{{- end }} +{{- if .Values.actionsMetricsServer.secret.github_app_id }} + github_app_id: {{ .Values.actionsMetricsServer.secret.github_app_id | toString | b64enc }} +{{- end }} +{{- if .Values.actionsMetricsServer.secret.github_app_installation_id }} + github_app_installation_id: {{ .Values.actionsMetricsServer.secret.github_app_installation_id | toString | b64enc }} +{{- end }} +{{- if .Values.actionsMetricsServer.secret.github_app_private_key }} + github_app_private_key: {{ .Values.actionsMetricsServer.secret.github_app_private_key | toString | b64enc }} +{{- end }} +{{- if .Values.actionsMetricsServer.secret.github_token }} + github_token: {{ .Values.actionsMetricsServer.secret.github_token | toString | b64enc }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml b/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml index 86a4aacd8c..b119ff1ddf 100644 --- a/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml +++ b/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml @@ -51,7 +51,7 @@ spec: {{- if .Values.githubWebhookServer.queueLimit }} - "--queue-limit={{ .Values.githubWebhookServer.queueLimit }}" {{- end }} - {{- if .Values.githubWebhookServer.logFormat }} + {{- if .Values.githubWebhookServer.logFormat }} - "--log-format={{ .Values.githubWebhookServer.logFormat }}" {{- end }} command: diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 04e6810b28..9400381fc0 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -398,6 +398,7 @@ type env struct { appID, appInstallationID, appPrivateKeyFile string githubToken, testRepo, testOrg, testOrgRepo string githubTokenWebhook string + createSecretsUsingHelm string testEnterprise string testEphemeral string scaleDownDelaySecondsAfterScaleOut int64 @@ -533,6 +534,7 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env { e.appInstallationID = testing.Getenv(t, "GITHUB_APP_INSTALLATION_ID") e.appPrivateKeyFile = testing.Getenv(t, "GITHUB_APP_PRIVATE_KEY_FILE") e.githubTokenWebhook = testing.Getenv(t, "WEBHOOK_GITHUB_TOKEN") + e.createSecretsUsingHelm = testing.Getenv(t, "CREATE_SECRETS_USING_HELM") e.repoToCommit = testing.Getenv(t, "TEST_COMMIT_REPO") e.testRepo = testing.Getenv(t, "TEST_REPO", "") e.testOrg = testing.Getenv(t, "TEST_ORG", "") @@ -718,6 +720,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, ch varEnv := []string{ "WEBHOOK_GITHUB_TOKEN=" + e.githubTokenWebhook, + "CREATE_SECRETS_USING_HELM=" + e.createSecretsUsingHelm, "TEST_ID=" + testID, "NAME=" + repo, "VERSION=" + tag, From 11d540c9c065154ee97876787d1df99e7c00dc43 Mon Sep 17 00:00:00 2001 From: Alex Williams Date: Tue, 28 Feb 2023 23:27:37 +0000 Subject: [PATCH 093/561] Ensure that EffectiveTime is updated on webhook scale down (#2258) Co-authored-by: Yusuke Kuoka --- ...orizontal_runner_autoscaler_batch_scale.go | 42 +++++++++++++++++-- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go index d3914f10db..236317de17 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go @@ -157,8 +157,8 @@ func (s *batchScaler) batchScale(ctx context.Context, batch batchScaleOperation) scale.log.V(2).Info("Adding capacity reservation", "amount", amount) + now := time.Now() if amount > 0 { - now := time.Now() copy.Spec.CapacityReservations = append(copy.Spec.CapacityReservations, v1alpha1.CapacityReservation{ EffectiveTime: metav1.Time{Time: now}, ExpirationTime: metav1.Time{Time: now.Add(scale.trigger.Duration.Duration)}, @@ -169,12 +169,48 @@ func (s *batchScaler) batchScale(ctx context.Context, batch batchScaleOperation) } else if amount < 0 { var reservations []v1alpha1.CapacityReservation - var found bool + var ( + found bool + foundIdx int + ) - for _, r := range copy.Spec.CapacityReservations { + for i, r := range copy.Spec.CapacityReservations { + r := r if !found && r.Replicas+amount == 0 { found = true + foundIdx = i } else { + // Note that we nil-check max replicas because this "fix" is needed only when there is the upper limit of runners. + // In other words, you don't need to reset effective time and expiration time when there is no max replicas. + // That's because the desired replicas would already contain the reservation since it's creation. + if found && copy.Spec.MaxReplicas != nil && i > foundIdx+*copy.Spec.MaxReplicas { + // Update newer CapacityReservations' time to now to trigger reconcile + // Without this, we might stuck in minReplicas unnecessarily long. + // That is, we might not scale up after an ephemeral runner has been deleted + // until a new scale up, all runners finish, or after DefaultRunnerPodRecreationDelayAfterWebhookScale + // See https://github.com/actions/actions-runner-controller/issues/2254 for more context. + r.EffectiveTime = metav1.Time{Time: now} + + // We also reset the scale trigger expiration time, so that you don't need to tweak + // scale trigger duratoin depending on maxReplicas. + // A detailed explanation follows. + // + // Let's say maxReplicas=3 and the workflow job of status=canceled result in deleting the first capacity reservation hence i=0. + // We are interested in at least four reservations and runners: + // i=0 - already included in the current desired replicas, but just got deleted + // i=1-2 - already included in the current desired replicas + // i=3 - not yet included in the current desired replicas, might have been expired while waiting in the queue + // + // i=3 is especially important here- If we didn't reset the expiration time of 3rd reservation, + // it might expire before a corresponding runner is created, due to the delay between the expiration timer starts and the runner is created. + // + // Why is there such delay? Because ARC implements the scale duration and expiration as such... + // The expiration timer starts when the reservation is created, while the runner is created only after the corresponding reservation fits within maxReplicas. + // + // We address that, by resetting the expiration time for fourth(i=3 in the above example) and subsequent reservations when the first reservation gets cancelled. + r.ExpirationTime = metav1.Time{Time: now.Add(scale.trigger.Duration.Duration)} + } + reservations = append(reservations, r) } } From 86bf7b6bd8442fdef55cd204ceda575429e54a5c Mon Sep 17 00:00:00 2001 From: Ava Stancu Date: Wed, 1 Mar 2023 10:43:17 +0100 Subject: [PATCH 094/561] Added space before backslash on the multi line command (#2340) --- .github/actions/e2e-arc-test/action.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/e2e-arc-test/action.yaml b/.github/actions/e2e-arc-test/action.yaml index 6883618a3e..568a9f2546 100644 --- a/.github/actions/e2e-arc-test/action.yaml +++ b/.github/actions/e2e-arc-test/action.yaml @@ -25,7 +25,7 @@ runs: helm install "arc-runner-${{ env.DATE_TIME }}" \ --namespace "arc-runners" \ --create-namespace \ - --set githubConfigUrl="${{ inputs.config-url }}"\ + --set githubConfigUrl="${{ inputs.config-url }}" \ --set githubConfigSecret.github_token="${{ inputs.github-token }}" \ ./charts/auto-scaling-runner-set \ --debug From 7572f9b242136a9c89b598d623b5d294d854e5e9 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Wed, 1 Mar 2023 13:16:03 +0100 Subject: [PATCH 095/561] Rename actions-runner-controller-2 and auto-scaling-runner-set helm charts (#2333) Co-authored-by: Ava S --- .github/actions/e2e-arc-test/action.yaml | 4 +- .github/workflows/publish-chart.yaml | 4 +- ...rc2.yaml => publish-runner-scale-set.yaml} | 56 ++--- Makefile | 8 +- .../templates/manager_role_binding.yaml | 12 - .../.helmignore | 0 .../Chart.yaml | 2 +- ...tions.github.com_autoscalinglisteners.yaml | 0 ...ions.github.com_autoscalingrunnersets.yaml | 0 .../actions.github.com_ephemeralrunners.yaml | 0 ...ctions.github.com_ephemeralrunnersets.yaml | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 38 ++-- .../templates/deployment.yaml | 14 +- .../templates/leader_election_role.yaml | 2 +- .../leader_election_role_binding.yaml | 6 +- .../templates/manager_role.yaml | 2 +- .../templates/manager_role_binding.yaml | 12 + .../templates/serviceaccount.yaml | 4 +- .../tests/template_test.go | 84 +++---- .../values.yaml | 6 +- .../.helmignore | 0 .../Chart.yaml | 2 +- .../ci/ci-values.yaml | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 50 ++--- .../templates/autoscalingrunnerset.yaml | 28 +-- .../templates/githubsecret.yaml | 4 +- .../templates/kube_mode_role.yaml | 2 +- .../templates/kube_mode_role_binding.yaml | 6 +- .../templates/kube_mode_serviceaccount.yaml | 4 +- .../no_permission_serviceaccount.yaml | 4 +- .../tests/template_test.go | 76 +++---- .../tests/values.yaml | 0 .../values.yaml | 2 +- .../actions-runner-controller-2/README.md | 212 +----------------- .../gha-runner-scale-set-controller/README.md | 211 +++++++++++++++++ test/platforms/aws-eks/README.md | 2 +- 38 files changed, 429 insertions(+), 428 deletions(-) rename .github/workflows/{publish-arc2.yaml => publish-runner-scale-set.yaml} (67%) delete mode 100644 charts/actions-runner-controller-2/templates/manager_role_binding.yaml rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/.helmignore (100%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/Chart.yaml (97%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/crds/actions.github.com_autoscalinglisteners.yaml (100%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/crds/actions.github.com_autoscalingrunnersets.yaml (100%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/crds/actions.github.com_ephemeralrunners.yaml (100%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/crds/actions.github.com_ephemeralrunnersets.yaml (100%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/templates/NOTES.txt (100%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/templates/_helpers.tpl (58%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/templates/deployment.yaml (82%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/templates/leader_election_role.yaml (80%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/templates/leader_election_role_binding.yaml (55%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/templates/manager_role.yaml (96%) create mode 100644 charts/gha-runner-scale-set-controller/templates/manager_role_binding.yaml rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/templates/serviceaccount.yaml (62%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/tests/template_test.go (82%) rename charts/{actions-runner-controller-2 => gha-runner-scale-set-controller}/values.yaml (90%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/.helmignore (100%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/Chart.yaml (97%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/ci/ci-values.yaml (100%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/templates/NOTES.txt (100%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/templates/_helpers.tpl (84%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/templates/autoscalingrunnerset.yaml (72%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/templates/githubsecret.yaml (92%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/templates/kube_mode_role.yaml (91%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/templates/kube_mode_role_binding.yaml (62%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/templates/kube_mode_serviceaccount.yaml (59%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/templates/no_permission_serviceaccount.yaml (58%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/tests/template_test.go (90%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/tests/values.yaml (100%) rename charts/{auto-scaling-runner-set => gha-runner-scale-set}/values.yaml (99%) create mode 100644 docs/preview/gha-runner-scale-set-controller/README.md diff --git a/.github/actions/e2e-arc-test/action.yaml b/.github/actions/e2e-arc-test/action.yaml index 568a9f2546..13f3586161 100644 --- a/.github/actions/e2e-arc-test/action.yaml +++ b/.github/actions/e2e-arc-test/action.yaml @@ -13,7 +13,7 @@ runs: using: "composite" steps: - name: Install ARC - run: helm install arc --namespace "arc-systems" --create-namespace ./charts/actions-runner-controller-2 + run: helm install arc --namespace "arc-systems" --create-namespace ./charts/gha-runner-scale-set-controller shell: bash - name: Get datetime # We are using this value further in the runner installation to avoid runner name collision that are a risk with hard coded values. @@ -27,7 +27,7 @@ runs: --create-namespace \ --set githubConfigUrl="${{ inputs.config-url }}" \ --set githubConfigSecret.github_token="${{ inputs.github-token }}" \ - ./charts/auto-scaling-runner-set \ + ./charts/gha-runner-scale-set \ --debug kubectl get pods -A shell: bash diff --git a/.github/workflows/publish-chart.yaml b/.github/workflows/publish-chart.yaml index 69646136d1..aaae2828ad 100644 --- a/.github/workflows/publish-chart.yaml +++ b/.github/workflows/publish-chart.yaml @@ -10,8 +10,8 @@ on: - 'charts/**' - '.github/workflows/publish-chart.yaml' - '!charts/actions-runner-controller/docs/**' - - '!charts/actions-runner-controller-2/**' - - '!charts/auto-scaling-runner-set/**' + - '!charts/gha-runner-scale-set-controller/**' + - '!charts/gha-runner-scale-set/**' - '!**.md' workflow_dispatch: diff --git a/.github/workflows/publish-arc2.yaml b/.github/workflows/publish-runner-scale-set.yaml similarity index 67% rename from .github/workflows/publish-arc2.yaml rename to .github/workflows/publish-runner-scale-set.yaml index 0b87ccd4dc..d32a3c0320 100644 --- a/.github/workflows/publish-arc2.yaml +++ b/.github/workflows/publish-runner-scale-set.yaml @@ -1,4 +1,4 @@ -name: Publish ARC 2 +name: Publish Runner Scale Set Controller Charts on: workflow_dispatch: @@ -18,13 +18,13 @@ on: required: true type: boolean default: false - publish_actions_runner_controller_2_chart: - description: 'Publish new helm chart for actions-runner-controller-2' + publish_gha_runner_scale_set_controller_chart: + description: 'Publish new helm chart for gha-runner-scale-set-controller' required: true type: boolean default: false - publish_auto_scaling_runner_set_chart: - description: 'Publish new helm chart for auto-scaling-runner-set' + publish_gha_runner_scale_set_chart: + description: 'Publish new helm chart for gha-runner-scale-set' required: true type: boolean default: false @@ -87,14 +87,14 @@ jobs: build-args: VERSION=${{ inputs.release_tag_name }} push: ${{ inputs.push_to_registries }} tags: | - ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-2:${{ inputs.release_tag_name }} - ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-2:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }} + ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }} + ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }} cache-from: type=gha cache-to: type=gha,mode=max - name: Job summary run: | - echo "The [publish-arc2](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/publish-arc2.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY + echo "The [publish-runner-scale-set.yaml](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/publish-runner-scale-set.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY @@ -103,10 +103,10 @@ jobs: echo "- Push to registries: ${{ inputs.push_to_registries }}" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY - publish-helm-chart-arc-2: - if: ${{ inputs.publish_actions_runner_controller_2_chart == true }} + publish-helm-chart-gha-runner-scale-set-controller: + if: ${{ inputs.publish_gha_runner_scale_set_controller_chart == true }} needs: build-push-image - name: Publish Helm chart for actions-runner-controller-2 + name: Publish Helm chart for gha-runner-scale-set-controller runs-on: ubuntu-latest steps: - name: Checkout @@ -133,27 +133,27 @@ jobs: with: version: ${{ env.HELM_VERSION }} - - name: Publish new helm chart for actions-runner-controller-2 + - name: Publish new helm chart for gha-runner-scale-set-controller run: | echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin - ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG=$(cat charts/actions-runner-controller-2/Chart.yaml | grep version: | cut -d " " -f 2) - echo "ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG=${ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG}" >> $GITHUB_ENV - helm package charts/actions-runner-controller-2/ --version="${ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG}" - helm push actions-runner-controller-2-"${ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts + GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set-controller/Chart.yaml | grep version: | cut -d " " -f 2) + echo "GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}" >> $GITHUB_ENV + helm package charts/gha-runner-scale-set-controller/ --version="${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}" + helm push gha-runner-scale-set-controller-"${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts - name: Job summary run: | - echo "New helm chart for actions-runner-controller-2 published successfully!" >> $GITHUB_STEP_SUMMARY + echo "New helm chart for gha-runner-scale-set-controller published successfully!" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY - echo "- Actions-Runner-Controller-2 Chart version: ${{ env.ACTIONS_RUNNER_CONTROLLER_2_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY + echo "- gha-runner-scale-set-controller Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY - publish-helm-chart-auto-scaling-runner-set: - if: ${{ inputs.publish_auto_scaling_runner_set_chart == true }} + publish-helm-chart-gha-runner-scale-set: + if: ${{ inputs.publish_gha_runner_scale_set_chart == true }} needs: build-push-image - name: Publish Helm chart for auto-scaling-runner-set + name: Publish Helm chart for gha-runner-scale-set runs-on: ubuntu-latest steps: - name: Checkout @@ -180,20 +180,20 @@ jobs: with: version: ${{ env.HELM_VERSION }} - - name: Publish new helm chart for auto-scaling-runner-set + - name: Publish new helm chart for gha-runner-scale-set run: | echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin - AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG=$(cat charts/auto-scaling-runner-set/Chart.yaml | grep version: | cut -d " " -f 2) - echo "AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG=${AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG}" >> $GITHUB_ENV - helm package charts/auto-scaling-runner-set/ --version="${AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG}" - helm push auto-scaling-runner-set-"${AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts + GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set/Chart.yaml | grep version: | cut -d " " -f 2) + echo "GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}" >> $GITHUB_ENV + helm package charts/gha-runner-scale-set/ --version="${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}" + helm push gha-runner-scale-set-"${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts - name: Job summary run: | - echo "New helm chart for auto-scaling-runner-set published successfully!" >> $GITHUB_STEP_SUMMARY + echo "New helm chart for gha-runner-scale-set published successfully!" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY - echo "- Auto-Scaling-Runner-Set Chart version: ${{ env.AUTO_SCALING_RUNNER_SET_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY + echo "- gha-runner-scale-set Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY diff --git a/Makefile b/Makefile index 2902d82ff7..7bb20d4263 100644 --- a/Makefile +++ b/Makefile @@ -174,10 +174,10 @@ manifests-gen-crds-fix: chart-crds: cp config/crd/bases/*.yaml charts/actions-runner-controller/crds/ - cp config/crd/bases/actions.github.com_autoscalingrunnersets.yaml charts/actions-runner-controller-2/crds/ - cp config/crd/bases/actions.github.com_autoscalinglisteners.yaml charts/actions-runner-controller-2/crds/ - cp config/crd/bases/actions.github.com_ephemeralrunnersets.yaml charts/actions-runner-controller-2/crds/ - cp config/crd/bases/actions.github.com_ephemeralrunners.yaml charts/actions-runner-controller-2/crds/ + cp config/crd/bases/actions.github.com_autoscalingrunnersets.yaml charts/gha-runner-scale-set-controller/crds/ + cp config/crd/bases/actions.github.com_autoscalinglisteners.yaml charts/gha-runner-scale-set-controller/crds/ + cp config/crd/bases/actions.github.com_ephemeralrunnersets.yaml charts/gha-runner-scale-set-controller/crds/ + cp config/crd/bases/actions.github.com_ephemeralrunners.yaml charts/gha-runner-scale-set-controller/crds/ rm charts/actions-runner-controller/crds/actions.github.com_autoscalingrunnersets.yaml rm charts/actions-runner-controller/crds/actions.github.com_autoscalinglisteners.yaml rm charts/actions-runner-controller/crds/actions.github.com_ephemeralrunnersets.yaml diff --git a/charts/actions-runner-controller-2/templates/manager_role_binding.yaml b/charts/actions-runner-controller-2/templates/manager_role_binding.yaml deleted file mode 100644 index cf8d6696f6..0000000000 --- a/charts/actions-runner-controller-2/templates/manager_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ include "actions-runner-controller-2.managerRoleBinding" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ include "actions-runner-controller-2.managerRoleName" . }} -subjects: -- kind: ServiceAccount - name: {{ include "actions-runner-controller-2.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/charts/actions-runner-controller-2/.helmignore b/charts/gha-runner-scale-set-controller/.helmignore similarity index 100% rename from charts/actions-runner-controller-2/.helmignore rename to charts/gha-runner-scale-set-controller/.helmignore diff --git a/charts/actions-runner-controller-2/Chart.yaml b/charts/gha-runner-scale-set-controller/Chart.yaml similarity index 97% rename from charts/actions-runner-controller-2/Chart.yaml rename to charts/gha-runner-scale-set-controller/Chart.yaml index 5e7984a0bd..7016e65041 100644 --- a/charts/actions-runner-controller-2/Chart.yaml +++ b/charts/gha-runner-scale-set-controller/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -name: actions-runner-controller-2 +name: gha-runner-scale-set-controller description: A Helm chart for install actions-runner-controller CRD # A chart can be either an 'application' or a 'library' chart. diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_autoscalinglisteners.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalinglisteners.yaml similarity index 100% rename from charts/actions-runner-controller-2/crds/actions.github.com_autoscalinglisteners.yaml rename to charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalinglisteners.yaml diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml similarity index 100% rename from charts/actions-runner-controller-2/crds/actions.github.com_autoscalingrunnersets.yaml rename to charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunners.yaml similarity index 100% rename from charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunners.yaml rename to charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunners.yaml diff --git a/charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunnersets.yaml similarity index 100% rename from charts/actions-runner-controller-2/crds/actions.github.com_ephemeralrunnersets.yaml rename to charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunnersets.yaml diff --git a/charts/actions-runner-controller-2/templates/NOTES.txt b/charts/gha-runner-scale-set-controller/templates/NOTES.txt similarity index 100% rename from charts/actions-runner-controller-2/templates/NOTES.txt rename to charts/gha-runner-scale-set-controller/templates/NOTES.txt diff --git a/charts/actions-runner-controller-2/templates/_helpers.tpl b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl similarity index 58% rename from charts/actions-runner-controller-2/templates/_helpers.tpl rename to charts/gha-runner-scale-set-controller/templates/_helpers.tpl index 4b5ffaed0b..ce3409fa04 100644 --- a/charts/actions-runner-controller-2/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl @@ -1,7 +1,7 @@ {{/* Expand the name of the chart. */}} -{{- define "actions-runner-controller-2.name" -}} +{{- define "gha-runner-scale-set-controller.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }} @@ -10,7 +10,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "actions-runner-controller-2.fullname" -}} +{{- define "gha-runner-scale-set-controller.fullname" -}} {{- if .Values.fullnameOverride }} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} @@ -26,16 +26,16 @@ If release name contains chart name it will be used as a full name. {{/* Create chart name and version as used by the chart label. */}} -{{- define "actions-runner-controller-2.chart" -}} +{{- define "gha-runner-scale-set-controller.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} -{{- define "actions-runner-controller-2.labels" -}} -helm.sh/chart: {{ include "actions-runner-controller-2.chart" . }} -{{ include "actions-runner-controller-2.selectorLabels" . }} +{{- define "gha-runner-scale-set-controller.labels" -}} +helm.sh/chart: {{ include "gha-runner-scale-set-controller.chart" . }} +{{ include "gha-runner-scale-set-controller.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} @@ -49,20 +49,20 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{/* Selector labels */}} -{{- define "actions-runner-controller-2.selectorLabels" -}} -app.kubernetes.io/name: {{ include "actions-runner-controller-2.name" . }} +{{- define "gha-runner-scale-set-controller.selectorLabels" -}} +app.kubernetes.io/name: {{ include "gha-runner-scale-set-controller.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* Create the name of the service account to use */}} -{{- define "actions-runner-controller-2.serviceAccountName" -}} +{{- define "gha-runner-scale-set-controller.serviceAccountName" -}} {{- if eq .Values.serviceAccount.name "default"}} {{- fail "serviceAccount.name cannot be set to 'default'" }} {{- end }} {{- if .Values.serviceAccount.create }} -{{- default (include "actions-runner-controller-2.fullname" .) .Values.serviceAccount.name }} +{{- default (include "gha-runner-scale-set-controller.fullname" .) .Values.serviceAccount.name }} {{- else }} {{- if not .Values.serviceAccount.name }} {{- fail "serviceAccount.name must be set if serviceAccount.create is false" }} @@ -72,23 +72,23 @@ Create the name of the service account to use {{- end }} {{- end }} -{{- define "actions-runner-controller-2.managerRoleName" -}} -{{- include "actions-runner-controller-2.fullname" . }}-manager-role +{{- define "gha-runner-scale-set-controller.managerRoleName" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-role {{- end }} -{{- define "actions-runner-controller-2.managerRoleBinding" -}} -{{- include "actions-runner-controller-2.fullname" . }}-manager-rolebinding +{{- define "gha-runner-scale-set-controller.managerRoleBinding" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-rolebinding {{- end }} -{{- define "actions-runner-controller-2.leaderElectionRoleName" -}} -{{- include "actions-runner-controller-2.fullname" . }}-leader-election-role +{{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election-role {{- end }} -{{- define "actions-runner-controller-2.leaderElectionRoleBinding" -}} -{{- include "actions-runner-controller-2.fullname" . }}-leader-election-rolebinding +{{- define "gha-runner-scale-set-controller.leaderElectionRoleBinding" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election-rolebinding {{- end }} -{{- define "actions-runner-controller-2.imagePullSecretsNames" -}} +{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}} {{- $names := list }} {{- range $k, $v := . }} {{- $names = append $names $v.name }} diff --git a/charts/actions-runner-controller-2/templates/deployment.yaml b/charts/gha-runner-scale-set-controller/templates/deployment.yaml similarity index 82% rename from charts/actions-runner-controller-2/templates/deployment.yaml rename to charts/gha-runner-scale-set-controller/templates/deployment.yaml index 0813f355ce..a35dc784f2 100644 --- a/charts/actions-runner-controller-2/templates/deployment.yaml +++ b/charts/gha-runner-scale-set-controller/templates/deployment.yaml @@ -1,15 +1,15 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "actions-runner-controller-2.fullname" . }} + name: {{ include "gha-runner-scale-set-controller.fullname" . }} namespace: {{ .Release.Namespace }} labels: - {{- include "actions-runner-controller-2.labels" . | nindent 4 }} + {{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }} spec: replicas: {{ default 1 .Values.replicaCount }} selector: matchLabels: - {{- include "actions-runner-controller-2.selectorLabels" . | nindent 6 }} + {{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 6 }} template: metadata: annotations: @@ -21,13 +21,13 @@ spec: app.kubernetes.io/part-of: actions-runner-controller app.kubernetes.io/component: controller-manager app.kubernetes.io/version: {{ .Chart.Version }} - {{- include "actions-runner-controller-2.selectorLabels" . | nindent 8 }} + {{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }} spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: {{ include "actions-runner-controller-2.serviceAccountName" . }} + serviceAccountName: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} {{- with .Values.podSecurityContext }} securityContext: {{- toYaml . | nindent 8 }} @@ -43,10 +43,10 @@ spec: - "--auto-scaling-runner-set-only" {{- if gt (int (default 1 .Values.replicaCount)) 1 }} - "--enable-leader-election" - - "--leader-election-id={{ include "actions-runner-controller-2.fullname" . }}" + - "--leader-election-id={{ include "gha-runner-scale-set-controller.fullname" . }}" {{- end }} {{- with .Values.imagePullSecrets }} - - "--auto-scaler-image-pull-secrets={{ include "actions-runner-controller-2.imagePullSecretsNames" . }}" + - "--auto-scaler-image-pull-secrets={{ include "gha-runner-scale-set-controller.imagePullSecretsNames" . }}" {{- end }} {{- with .Values.flags.logLevel }} - "--log-level={{ . }}" diff --git a/charts/actions-runner-controller-2/templates/leader_election_role.yaml b/charts/gha-runner-scale-set-controller/templates/leader_election_role.yaml similarity index 80% rename from charts/actions-runner-controller-2/templates/leader_election_role.yaml rename to charts/gha-runner-scale-set-controller/templates/leader_election_role.yaml index 72c18189a6..a64906d369 100644 --- a/charts/actions-runner-controller-2/templates/leader_election_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/leader_election_role.yaml @@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "actions-runner-controller-2.leaderElectionRoleName" . }} + name: {{ include "gha-runner-scale-set-controller.leaderElectionRoleName" . }} namespace: {{ .Release.Namespace }} rules: - apiGroups: ["coordination.k8s.io"] diff --git a/charts/actions-runner-controller-2/templates/leader_election_role_binding.yaml b/charts/gha-runner-scale-set-controller/templates/leader_election_role_binding.yaml similarity index 55% rename from charts/actions-runner-controller-2/templates/leader_election_role_binding.yaml rename to charts/gha-runner-scale-set-controller/templates/leader_election_role_binding.yaml index 3ab4d9ee6d..b33dda68da 100644 --- a/charts/actions-runner-controller-2/templates/leader_election_role_binding.yaml +++ b/charts/gha-runner-scale-set-controller/templates/leader_election_role_binding.yaml @@ -2,14 +2,14 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ include "actions-runner-controller-2.leaderElectionRoleBinding" . }} + name: {{ include "gha-runner-scale-set-controller.leaderElectionRoleBinding" . }} namespace: {{ .Release.Namespace }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: {{ include "actions-runner-controller-2.leaderElectionRoleName" . }} + name: {{ include "gha-runner-scale-set-controller.leaderElectionRoleName" . }} subjects: - kind: ServiceAccount - name: {{ include "actions-runner-controller-2.serviceAccountName" . }} + name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} namespace: {{ .Release.Namespace }} {{- end }} \ No newline at end of file diff --git a/charts/actions-runner-controller-2/templates/manager_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_role.yaml similarity index 96% rename from charts/actions-runner-controller-2/templates/manager_role.yaml rename to charts/gha-runner-scale-set-controller/templates/manager_role.yaml index 34639b9a2f..f51b47c4e2 100644 --- a/charts/actions-runner-controller-2/templates/manager_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_role.yaml @@ -1,7 +1,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "actions-runner-controller-2.managerRoleName" . }} + name: {{ include "gha-runner-scale-set-controller.managerRoleName" . }} rules: - apiGroups: - actions.github.com diff --git a/charts/gha-runner-scale-set-controller/templates/manager_role_binding.yaml b/charts/gha-runner-scale-set-controller/templates/manager_role_binding.yaml new file mode 100644 index 0000000000..72549d6ad1 --- /dev/null +++ b/charts/gha-runner-scale-set-controller/templates/manager_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "gha-runner-scale-set-controller.managerRoleBinding" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "gha-runner-scale-set-controller.managerRoleName" . }} +subjects: +- kind: ServiceAccount + name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/charts/actions-runner-controller-2/templates/serviceaccount.yaml b/charts/gha-runner-scale-set-controller/templates/serviceaccount.yaml similarity index 62% rename from charts/actions-runner-controller-2/templates/serviceaccount.yaml rename to charts/gha-runner-scale-set-controller/templates/serviceaccount.yaml index 0032039322..090d3a44f5 100644 --- a/charts/actions-runner-controller-2/templates/serviceaccount.yaml +++ b/charts/gha-runner-scale-set-controller/templates/serviceaccount.yaml @@ -2,10 +2,10 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ include "actions-runner-controller-2.serviceAccountName" . }} + name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} namespace: {{ .Release.Namespace }} labels: - {{- include "actions-runner-controller-2.labels" . | nindent 4 }} + {{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }} {{- with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} diff --git a/charts/actions-runner-controller-2/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go similarity index 82% rename from charts/actions-runner-controller-2/tests/template_test.go rename to charts/gha-runner-scale-set-controller/tests/template_test.go index 6c77fa3650..6f9c47d6e5 100644 --- a/charts/actions-runner-controller-2/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -26,7 +26,7 @@ func TestTemplate_CreateServiceAccount(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) releaseName := "test-arc" @@ -46,7 +46,7 @@ func TestTemplate_CreateServiceAccount(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &serviceAccount) assert.Equal(t, namespaceName, serviceAccount.Namespace) - assert.Equal(t, "test-arc-actions-runner-controller-2", serviceAccount.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", serviceAccount.Name) assert.Equal(t, "bar", string(serviceAccount.Annotations["foo"])) } @@ -54,7 +54,7 @@ func TestTemplate_CreateServiceAccount_OverwriteName(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) releaseName := "test-arc" @@ -83,7 +83,7 @@ func TestTemplate_CreateServiceAccount_CannotUseDefaultServiceAccount(t *testing t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) releaseName := "test-arc" @@ -106,7 +106,7 @@ func TestTemplate_NotCreateServiceAccount(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) releaseName := "test-arc" @@ -129,7 +129,7 @@ func TestTemplate_NotCreateServiceAccount_ServiceAccountNotSet(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) releaseName := "test-arc" @@ -151,7 +151,7 @@ func TestTemplate_CreateManagerRole(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) releaseName := "test-arc" @@ -168,7 +168,7 @@ func TestTemplate_CreateManagerRole(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &managerRole) assert.Empty(t, managerRole.Namespace, "ClusterRole should not have a namespace") - assert.Equal(t, "test-arc-actions-runner-controller-2-manager-role", managerRole.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-role", managerRole.Name) assert.Equal(t, 17, len(managerRole.Rules)) } @@ -176,7 +176,7 @@ func TestTemplate_ManagerRoleBinding(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) releaseName := "test-arc" @@ -195,9 +195,9 @@ func TestTemplate_ManagerRoleBinding(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &managerRoleBinding) assert.Empty(t, managerRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace") - assert.Equal(t, "test-arc-actions-runner-controller-2-manager-rolebinding", managerRoleBinding.Name) - assert.Equal(t, "test-arc-actions-runner-controller-2-manager-role", managerRoleBinding.RoleRef.Name) - assert.Equal(t, "test-arc-actions-runner-controller-2", managerRoleBinding.Subjects[0].Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-rolebinding", managerRoleBinding.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-role", managerRoleBinding.RoleRef.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerRoleBinding.Subjects[0].Name) assert.Equal(t, namespaceName, managerRoleBinding.Subjects[0].Namespace) } @@ -205,7 +205,7 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml")) @@ -231,25 +231,25 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &deployment) assert.Equal(t, namespaceName, deployment.Namespace) - assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Name) - assert.Equal(t, "actions-runner-controller-2-"+chart.Version, deployment.Labels["helm.sh/chart"]) - assert.Equal(t, "actions-runner-controller-2", deployment.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name) + assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"]) + assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/name"]) assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) assert.Equal(t, int32(1), *deployment.Spec.Replicas) - assert.Equal(t, "actions-runner-controller-2", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) + assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"]) - assert.Equal(t, "actions-runner-controller-2", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"]) assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"]) assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0) - assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Spec.Template.Spec.ServiceAccountName) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.ServiceAccountName) assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext) assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName) assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds) @@ -263,7 +263,7 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name) - assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) @@ -291,7 +291,7 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml")) @@ -312,9 +312,9 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { "image.pullPolicy": "Always", "image.tag": "dev", "imagePullSecrets[0].name": "dockerhub", - "nameOverride": "actions-runner-controller-2-override", - "fullnameOverride": "actions-runner-controller-2-fullname-override", - "serviceAccount.name": "actions-runner-controller-2-sa", + "nameOverride": "gha-runner-scale-set-controller-override", + "fullnameOverride": "gha-runner-scale-set-controller-fullname-override", + "serviceAccount.name": "gha-runner-scale-set-controller-sa", "podAnnotations.foo": "bar", "podSecurityContext.fsGroup": "1000", "securityContext.runAsUser": "1000", @@ -335,9 +335,9 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &deployment) assert.Equal(t, namespaceName, deployment.Namespace) - assert.Equal(t, "actions-runner-controller-2-fullname-override", deployment.Name) - assert.Equal(t, "actions-runner-controller-2-"+chart.Version, deployment.Labels["helm.sh/chart"]) - assert.Equal(t, "actions-runner-controller-2-override", deployment.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "gha-runner-scale-set-controller-fullname-override", deployment.Name) + assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"]) + assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Labels["app.kubernetes.io/name"]) assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) @@ -346,10 +346,10 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Equal(t, int32(1), *deployment.Spec.Replicas) - assert.Equal(t, "actions-runner-controller-2-override", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) + assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"]) - assert.Equal(t, "actions-runner-controller-2-override", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"]) assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"]) @@ -357,7 +357,7 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1) assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name) - assert.Equal(t, "actions-runner-controller-2-sa", deployment.Spec.Template.Spec.ServiceAccountName) + assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName) assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup) assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName) assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds) @@ -377,7 +377,7 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name) - assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, corev1.PullAlways, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) @@ -408,7 +408,7 @@ func TestTemplate_EnableLeaderElectionRole(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) releaseName := "test-arc" @@ -426,7 +426,7 @@ func TestTemplate_EnableLeaderElectionRole(t *testing.T) { var leaderRole rbacv1.Role helm.UnmarshalK8SYaml(t, output, &leaderRole) - assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-role", leaderRole.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-role", leaderRole.Name) assert.Equal(t, namespaceName, leaderRole.Namespace) } @@ -434,7 +434,7 @@ func TestTemplate_EnableLeaderElectionRoleBinding(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) releaseName := "test-arc" @@ -452,17 +452,17 @@ func TestTemplate_EnableLeaderElectionRoleBinding(t *testing.T) { var leaderRoleBinding rbacv1.RoleBinding helm.UnmarshalK8SYaml(t, output, &leaderRoleBinding) - assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-rolebinding", leaderRoleBinding.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-rolebinding", leaderRoleBinding.Name) assert.Equal(t, namespaceName, leaderRoleBinding.Namespace) - assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-role", leaderRoleBinding.RoleRef.Name) - assert.Equal(t, "test-arc-actions-runner-controller-2", leaderRoleBinding.Subjects[0].Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-role", leaderRoleBinding.RoleRef.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", leaderRoleBinding.Subjects[0].Name) } func TestTemplate_EnableLeaderElection(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) releaseName := "test-arc" @@ -482,13 +482,13 @@ func TestTemplate_EnableLeaderElection(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &deployment) assert.Equal(t, namespaceName, deployment.Namespace) - assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name) assert.Equal(t, int32(2), *deployment.Spec.Replicas) assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name) - assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) @@ -497,7 +497,7 @@ func TestTemplate_EnableLeaderElection(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4) assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) assert.Equal(t, "--enable-leader-election", deployment.Spec.Template.Spec.Containers[0].Args[1]) - assert.Equal(t, "--leader-election-id=test-arc-actions-runner-controller-2", deployment.Spec.Template.Spec.Containers[0].Args[2]) + assert.Equal(t, "--leader-election-id=test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.Containers[0].Args[2]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[3]) } @@ -505,7 +505,7 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../actions-runner-controller-2") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") require.NoError(t, err) releaseName := "test-arc" diff --git a/charts/actions-runner-controller-2/values.yaml b/charts/gha-runner-scale-set-controller/values.yaml similarity index 90% rename from charts/actions-runner-controller-2/values.yaml rename to charts/gha-runner-scale-set-controller/values.yaml index 2dfaa27393..4e23294462 100644 --- a/charts/actions-runner-controller-2/values.yaml +++ b/charts/gha-runner-scale-set-controller/values.yaml @@ -1,15 +1,15 @@ -# Default values for actions-runner-controller-2. +# Default values for gha-runner-scale-set-controller. # This is a YAML-formatted file. # Declare variables to be passed into your templates. labels: {} # leaderElection will be enabled when replicaCount>1, # So, only one replica will in charge of reconciliation at a given time -# leaderElectionId will be set to {{ define actions-runner-controller-2.fullname }}. +# leaderElectionId will be set to {{ define gha-runner-scale-set-controller.fullname }}. replicaCount: 1 image: - repository: "ghcr.io/actions/actions-runner-controller-2" + repository: "ghcr.io/actions/gha-runner-scale-set-controller" pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. tag: "" diff --git a/charts/auto-scaling-runner-set/.helmignore b/charts/gha-runner-scale-set/.helmignore similarity index 100% rename from charts/auto-scaling-runner-set/.helmignore rename to charts/gha-runner-scale-set/.helmignore diff --git a/charts/auto-scaling-runner-set/Chart.yaml b/charts/gha-runner-scale-set/Chart.yaml similarity index 97% rename from charts/auto-scaling-runner-set/Chart.yaml rename to charts/gha-runner-scale-set/Chart.yaml index 0de198886e..0349000d08 100644 --- a/charts/auto-scaling-runner-set/Chart.yaml +++ b/charts/gha-runner-scale-set/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -name: auto-scaling-runner-set +name: gha-runner-scale-set description: A Helm chart for deploying an AutoScalingRunnerSet # A chart can be either an 'application' or a 'library' chart. diff --git a/charts/auto-scaling-runner-set/ci/ci-values.yaml b/charts/gha-runner-scale-set/ci/ci-values.yaml similarity index 100% rename from charts/auto-scaling-runner-set/ci/ci-values.yaml rename to charts/gha-runner-scale-set/ci/ci-values.yaml diff --git a/charts/auto-scaling-runner-set/templates/NOTES.txt b/charts/gha-runner-scale-set/templates/NOTES.txt similarity index 100% rename from charts/auto-scaling-runner-set/templates/NOTES.txt rename to charts/gha-runner-scale-set/templates/NOTES.txt diff --git a/charts/auto-scaling-runner-set/templates/_helpers.tpl b/charts/gha-runner-scale-set/templates/_helpers.tpl similarity index 84% rename from charts/auto-scaling-runner-set/templates/_helpers.tpl rename to charts/gha-runner-scale-set/templates/_helpers.tpl index 3b51df2a80..944bf97926 100644 --- a/charts/auto-scaling-runner-set/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set/templates/_helpers.tpl @@ -1,7 +1,7 @@ {{/* Expand the name of the chart. */}} -{{- define "auto-scaling-runner-set.name" -}} +{{- define "gha-runner-scale-set.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }} @@ -10,7 +10,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "auto-scaling-runner-set.fullname" -}} +{{- define "gha-runner-scale-set.fullname" -}} {{- if .Values.fullnameOverride }} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} @@ -26,16 +26,16 @@ If release name contains chart name it will be used as a full name. {{/* Create chart name and version as used by the chart label. */}} -{{- define "auto-scaling-runner-set.chart" -}} +{{- define "gha-runner-scale-set.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} -{{- define "auto-scaling-runner-set.labels" -}} -helm.sh/chart: {{ include "auto-scaling-runner-set.chart" . }} -{{ include "auto-scaling-runner-set.selectorLabels" . }} +{{- define "gha-runner-scale-set.labels" -}} +helm.sh/chart: {{ include "gha-runner-scale-set.chart" . }} +{{ include "gha-runner-scale-set.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} @@ -45,12 +45,12 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{/* Selector labels */}} -{{- define "auto-scaling-runner-set.selectorLabels" -}} -app.kubernetes.io/name: {{ include "auto-scaling-runner-set.name" . }} +{{- define "gha-runner-scale-set.selectorLabels" -}} +app.kubernetes.io/name: {{ include "gha-runner-scale-set.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} -{{- define "auto-scaling-runner-set.githubsecret" -}} +{{- define "gha-runner-scale-set.githubsecret" -}} {{- if kindIs "string" .Values.githubConfigSecret }} {{- if not (empty .Values.githubConfigSecret) }} {{- .Values.githubConfigSecret }} @@ -58,23 +58,23 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- fail "Values.githubConfigSecret is required for setting auth with GitHub server." }} {{- end }} {{- else }} -{{- include "auto-scaling-runner-set.fullname" . }}-github-secret +{{- include "gha-runner-scale-set.fullname" . }}-github-secret {{- end }} {{- end }} -{{- define "auto-scaling-runner-set.noPermissionServiceAccountName" -}} -{{- include "auto-scaling-runner-set.fullname" . }}-no-permission-service-account +{{- define "gha-runner-scale-set.noPermissionServiceAccountName" -}} +{{- include "gha-runner-scale-set.fullname" . }}-no-permission-service-account {{- end }} -{{- define "auto-scaling-runner-set.kubeModeRoleName" -}} -{{- include "auto-scaling-runner-set.fullname" . }}-kube-mode-role +{{- define "gha-runner-scale-set.kubeModeRoleName" -}} +{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role {{- end }} -{{- define "auto-scaling-runner-set.kubeModeServiceAccountName" -}} -{{- include "auto-scaling-runner-set.fullname" . }}-kube-mode-service-account +{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}} +{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account {{- end }} -{{- define "auto-scaling-runner-set.dind-init-container" -}} +{{- define "gha-runner-scale-set.dind-init-container" -}} {{- range $i, $val := .Values.template.spec.containers -}} {{- if eq $val.name "runner" -}} image: {{ $val.image }} @@ -91,7 +91,7 @@ volumeMounts: {{- end }} {{- end }} -{{- define "auto-scaling-runner-set.dind-container" -}} +{{- define "gha-runner-scale-set.dind-container" -}} image: docker:dind securityContext: privileged: true @@ -104,14 +104,14 @@ volumeMounts: mountPath: /actions-runner/externals {{- end }} -{{- define "auto-scaling-runner-set.dind-volume" -}} +{{- define "gha-runner-scale-set.dind-volume" -}} - name: dind-cert emptyDir: {} - name: dind-externals emptyDir: {} {{- end }} -{{- define "auto-scaling-runner-set.dind-work-volume" -}} +{{- define "gha-runner-scale-set.dind-work-volume" -}} {{- $createWorkVolume := 1 }} {{- range $i, $volume := .Values.template.spec.volumes }} {{- if eq $volume.name "work" }} @@ -130,7 +130,7 @@ volumeMounts: {{- end }} {{- end }} -{{- define "auto-scaling-runner-set.kubernetes-mode-work-volume" -}} +{{- define "gha-runner-scale-set.kubernetes-mode-work-volume" -}} {{- $createWorkVolume := 1 }} {{- range $i, $volume := .Values.template.spec.volumes }} {{- if eq $volume.name "work" }} @@ -152,7 +152,7 @@ volumeMounts: {{- end }} {{- end }} -{{- define "auto-scaling-runner-set.non-work-volumes" -}} +{{- define "gha-runner-scale-set.non-work-volumes" -}} {{- range $i, $volume := .Values.template.spec.volumes }} {{- if ne $volume.name "work" }} - name: {{ $volume.name }} @@ -165,7 +165,7 @@ volumeMounts: {{- end }} {{- end }} -{{- define "auto-scaling-runner-set.non-runner-containers" -}} +{{- define "gha-runner-scale-set.non-runner-containers" -}} {{- range $i, $container := .Values.template.spec.containers -}} {{- if ne $container.name "runner" -}} - name: {{ $container.name }} @@ -178,7 +178,7 @@ volumeMounts: {{- end }} {{- end }} -{{- define "auto-scaling-runner-set.dind-runner-container" -}} +{{- define "gha-runner-scale-set.dind-runner-container" -}} {{- range $i, $container := .Values.template.spec.containers -}} {{- if eq $container.name "runner" -}} {{- range $key, $val := $container }} @@ -261,7 +261,7 @@ volumeMounts: {{- end }} {{- end }} -{{- define "auto-scaling-runner-set.kubernetes-mode-runner-container" -}} +{{- define "gha-runner-scale-set.kubernetes-mode-runner-container" -}} {{- range $i, $container := .Values.template.spec.containers -}} {{- if eq $container.name "runner" -}} {{- range $key, $val := $container }} diff --git a/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml similarity index 72% rename from charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml rename to charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index d12c886a27..d01f8cd966 100644 --- a/charts/auto-scaling-runner-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -4,10 +4,10 @@ metadata: name: {{ .Release.Name }} namespace: {{ .Release.Namespace }} labels: - {{- include "auto-scaling-runner-set.labels" . | nindent 4 }} + {{- include "gha-runner-scale-set.labels" . | nindent 4 }} spec: githubConfigUrl: {{ required ".Values.githubConfigUrl is required" .Values.githubConfigUrl }} - githubConfigSecret: {{ include "auto-scaling-runner-set.githubsecret" . }} + githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }} {{- with .Values.runnerGroup }} runnerGroup: {{ . }} {{- end }} @@ -68,15 +68,15 @@ spec: {{- end }} {{- end }} {{- if eq .Values.containerMode.type "kubernetes" }} - serviceAccountName: {{ default (include "auto-scaling-runner-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }} + serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }} {{- else }} - serviceAccountName: {{ default (include "auto-scaling-runner-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }} + serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }} {{- end }} {{- if or .Values.template.spec.initContainers (eq .Values.containerMode.type "dind") }} initContainers: {{- if eq .Values.containerMode.type "dind" }} - name: init-dind-externals - {{- include "auto-scaling-runner-set.dind-init-container" . | nindent 8 }} + {{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }} {{- end }} {{- with .Values.template.spec.initContainers }} {{- toYaml . | nindent 8 }} @@ -85,24 +85,24 @@ spec: containers: {{- if eq .Values.containerMode.type "dind" }} - name: runner - {{- include "auto-scaling-runner-set.dind-runner-container" . | nindent 8 }} + {{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }} - name: dind - {{- include "auto-scaling-runner-set.dind-container" . | nindent 8 }} - {{- include "auto-scaling-runner-set.non-runner-containers" . | nindent 6 }} + {{- include "gha-runner-scale-set.dind-container" . | nindent 8 }} + {{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }} {{- else if eq .Values.containerMode.type "kubernetes" }} - name: runner - {{- include "auto-scaling-runner-set.kubernetes-mode-runner-container" . | nindent 8 }} - {{- include "auto-scaling-runner-set.non-runner-containers" . | nindent 6 }} + {{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }} + {{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }} {{- else }} {{ .Values.template.spec.containers | toYaml | nindent 6 }} {{- end }} {{- if or .Values.template.spec.volumes (eq .Values.containerMode.type "dind") (eq .Values.containerMode.type "kubernetes") }} volumes: {{- if eq .Values.containerMode.type "dind" }} - {{- include "auto-scaling-runner-set.dind-volume" . | nindent 6 }} - {{- include "auto-scaling-runner-set.dind-work-volume" . | nindent 6 }} + {{- include "gha-runner-scale-set.dind-volume" . | nindent 6 }} + {{- include "gha-runner-scale-set.dind-work-volume" . | nindent 6 }} {{- else if eq .Values.containerMode.type "kubernetes" }} - {{- include "auto-scaling-runner-set.kubernetes-mode-work-volume" . | nindent 6 }} + {{- include "gha-runner-scale-set.kubernetes-mode-work-volume" . | nindent 6 }} {{- end }} - {{- include "auto-scaling-runner-set.non-work-volumes" . | nindent 6 }} + {{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }} {{- end }} \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/templates/githubsecret.yaml b/charts/gha-runner-scale-set/templates/githubsecret.yaml similarity index 92% rename from charts/auto-scaling-runner-set/templates/githubsecret.yaml rename to charts/gha-runner-scale-set/templates/githubsecret.yaml index 4374f8335d..03411486ac 100644 --- a/charts/auto-scaling-runner-set/templates/githubsecret.yaml +++ b/charts/gha-runner-scale-set/templates/githubsecret.yaml @@ -2,10 +2,10 @@ apiVersion: v1 kind: Secret metadata: - name: {{ include "auto-scaling-runner-set.githubsecret" . }} + name: {{ include "gha-runner-scale-set.githubsecret" . }} namespace: {{ .Release.Namespace }} labels: - {{- include "auto-scaling-runner-set.labels" . | nindent 4 }} + {{- include "gha-runner-scale-set.labels" . | nindent 4 }} finalizers: - actions.github.com/secret-protection data: diff --git a/charts/auto-scaling-runner-set/templates/kube_mode_role.yaml b/charts/gha-runner-scale-set/templates/kube_mode_role.yaml similarity index 91% rename from charts/auto-scaling-runner-set/templates/kube_mode_role.yaml rename to charts/gha-runner-scale-set/templates/kube_mode_role.yaml index a12d02d585..ffc0c68e75 100644 --- a/charts/auto-scaling-runner-set/templates/kube_mode_role.yaml +++ b/charts/gha-runner-scale-set/templates/kube_mode_role.yaml @@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "auto-scaling-runner-set.kubeModeRoleName" . }} + name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }} namespace: {{ .Release.Namespace }} rules: - apiGroups: [""] diff --git a/charts/auto-scaling-runner-set/templates/kube_mode_role_binding.yaml b/charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml similarity index 62% rename from charts/auto-scaling-runner-set/templates/kube_mode_role_binding.yaml rename to charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml index eafdda5e73..bd1c634db7 100644 --- a/charts/auto-scaling-runner-set/templates/kube_mode_role_binding.yaml +++ b/charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml @@ -2,14 +2,14 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ include "auto-scaling-runner-set.kubeModeRoleName" . }} + name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }} namespace: {{ .Release.Namespace }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: {{ include "auto-scaling-runner-set.kubeModeRoleName" . }} + name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }} subjects: - kind: ServiceAccount - name: {{ include "auto-scaling-runner-set.kubeModeServiceAccountName" . }} + name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }} namespace: {{ .Release.Namespace }} {{- end }} \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/templates/kube_mode_serviceaccount.yaml b/charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml similarity index 59% rename from charts/auto-scaling-runner-set/templates/kube_mode_serviceaccount.yaml rename to charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml index 60dc0a97c9..8f180f71bd 100644 --- a/charts/auto-scaling-runner-set/templates/kube_mode_serviceaccount.yaml +++ b/charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml @@ -2,8 +2,8 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ include "auto-scaling-runner-set.kubeModeServiceAccountName" . }} + name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }} namespace: {{ .Release.Namespace }} labels: - {{- include "auto-scaling-runner-set.labels" . | nindent 4 }} + {{- include "gha-runner-scale-set.labels" . | nindent 4 }} {{- end }} \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/templates/no_permission_serviceaccount.yaml b/charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml similarity index 58% rename from charts/auto-scaling-runner-set/templates/no_permission_serviceaccount.yaml rename to charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml index 07608bc9cf..3aa2d0e277 100644 --- a/charts/auto-scaling-runner-set/templates/no_permission_serviceaccount.yaml +++ b/charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml @@ -2,8 +2,8 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ include "auto-scaling-runner-set.noPermissionServiceAccountName" . }} + name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }} namespace: {{ .Release.Namespace }} labels: - {{- include "auto-scaling-runner-set.labels" . | nindent 4 }} + {{- include "gha-runner-scale-set.labels" . | nindent 4 }} {{- end }} \ No newline at end of file diff --git a/charts/auto-scaling-runner-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go similarity index 90% rename from charts/auto-scaling-runner-set/tests/template_test.go rename to charts/gha-runner-scale-set/tests/template_test.go index 96a8e894ab..a26e571a6c 100644 --- a/charts/auto-scaling-runner-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -19,7 +19,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -39,7 +39,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &githubSecret) assert.Equal(t, namespaceName, githubSecret.Namespace) - assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", githubSecret.Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name) assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"])) assert.Equal(t, "actions.github.com/secret-protection", githubSecret.Finalizers[0]) } @@ -48,7 +48,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -79,7 +79,7 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAuthInput(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -104,7 +104,7 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAppInput(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -128,7 +128,7 @@ func TestTemplateNotRenderedGitHubSecretWithPredefinedSecret(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -150,7 +150,7 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -169,20 +169,20 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &serviceAccount) assert.Equal(t, namespaceName, serviceAccount.Namespace) - assert.Equal(t, "test-runners-auto-scaling-runner-set-no-permission-service-account", serviceAccount.Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", serviceAccount.Name) output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) var ars v1alpha1.AutoscalingRunnerSet helm.UnmarshalK8SYaml(t, output, &ars) - assert.Equal(t, "test-runners-auto-scaling-runner-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName) + assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName) } func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -202,14 +202,14 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &serviceAccount) assert.Equal(t, namespaceName, serviceAccount.Namespace) - assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-service-account", serviceAccount.Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name) output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"}) var role rbacv1.Role helm.UnmarshalK8SYaml(t, output, &role) assert.Equal(t, namespaceName, role.Namespace) - assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-role", role.Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name) assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules") assert.Equal(t, "pods", role.Rules[0].Resources[0]) assert.Equal(t, "pods/exec", role.Rules[1].Resources[0]) @@ -222,25 +222,25 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &roleBinding) assert.Equal(t, namespaceName, roleBinding.Namespace) - assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-role", roleBinding.Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.Name) assert.Len(t, roleBinding.Subjects, 1) - assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-service-account", roleBinding.Subjects[0].Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name) assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace) - assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-role", roleBinding.RoleRef.Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name) assert.Equal(t, "Role", roleBinding.RoleRef.Kind) output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) var ars v1alpha1.AutoscalingRunnerSet helm.UnmarshalK8SYaml(t, output, &ars) - assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-service-account", ars.Spec.Template.Spec.ServiceAccountName) + assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", ars.Spec.Template.Spec.ServiceAccountName) } func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -269,7 +269,7 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -291,10 +291,10 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) { assert.Equal(t, namespaceName, ars.Namespace) assert.Equal(t, "test-runners", ars.Name) - assert.Equal(t, "auto-scaling-runner-set", ars.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) - assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", ars.Spec.GitHubConfigSecret) + assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret) assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty") @@ -314,7 +314,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -359,7 +359,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MaxRunnersValidationError(t *testi t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -384,7 +384,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinRunnersValidationError(t *testi t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -410,7 +410,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationError(t *te t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -436,7 +436,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationSameValue(t t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -465,7 +465,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMin(t t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -493,7 +493,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMax(t t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -521,7 +521,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunners_FromValuesFile(t *te t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) testValuesPath, err := filepath.Abs("../tests/values.yaml") @@ -548,7 +548,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -571,10 +571,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { assert.Equal(t, namespaceName, ars.Namespace) assert.Equal(t, "test-runners", ars.Name) - assert.Equal(t, "auto-scaling-runner-set", ars.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) - assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", ars.Spec.GitHubConfigSecret) + assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret) assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty") @@ -631,7 +631,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T) t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -654,10 +654,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T) assert.Equal(t, namespaceName, ars.Namespace) assert.Equal(t, "test-runners", ars.Name) - assert.Equal(t, "auto-scaling-runner-set", ars.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) - assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", ars.Spec.GitHubConfigSecret) + assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret) assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty") assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil") @@ -686,7 +686,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T) t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -708,7 +708,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T) assert.Equal(t, namespaceName, ars.Namespace) assert.Equal(t, "test-runners", ars.Name) - assert.Equal(t, "auto-scaling-runner-set", ars.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) assert.Equal(t, "pre-defined-secrets", ars.Spec.GitHubConfigSecret) @@ -718,7 +718,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ErrorOnEmptyPredefinedSecret(t *te t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" @@ -742,7 +742,7 @@ func TestTemplateRenderedWithProxy(t *testing.T) { t.Parallel() // Path to the helm chart we will test - helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set") + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") require.NoError(t, err) releaseName := "test-runners" diff --git a/charts/auto-scaling-runner-set/tests/values.yaml b/charts/gha-runner-scale-set/tests/values.yaml similarity index 100% rename from charts/auto-scaling-runner-set/tests/values.yaml rename to charts/gha-runner-scale-set/tests/values.yaml diff --git a/charts/auto-scaling-runner-set/values.yaml b/charts/gha-runner-scale-set/values.yaml similarity index 99% rename from charts/auto-scaling-runner-set/values.yaml rename to charts/gha-runner-scale-set/values.yaml index 0e3b10be25..f3bbc21476 100644 --- a/charts/auto-scaling-runner-set/values.yaml +++ b/charts/gha-runner-scale-set/values.yaml @@ -13,7 +13,7 @@ githubConfigSecret: ### GitHub PAT Configuration github_token: "" -## If you have a pre-define Kubernetes secret in the same namespace the auto-scaling-runner-set is going to deploy, +## If you have a pre-define Kubernetes secret in the same namespace the gha-runner-scale-set is going to deploy, ## you can also reference it via `githubConfigSecret: pre-defined-secret`. ## You need to make sure your predefined secret has all the required secret data set properly. ## For a pre-defined secret using GitHub PAT, the secret needs to be created like this: diff --git a/docs/preview/actions-runner-controller-2/README.md b/docs/preview/actions-runner-controller-2/README.md index ec74acbdd9..9e192df0b0 100644 --- a/docs/preview/actions-runner-controller-2/README.md +++ b/docs/preview/actions-runner-controller-2/README.md @@ -1,211 +1 @@ -# Autoscaling Runner Scale Sets mode - -**⚠️ This mode is currently only available for a limited number of organizations.** - -This new autoscaling mode brings numerous enhancements (described in the following sections) that will make your experience more reliable and secure. - -## How it works - -![arc_hld_v1 drawio (1)](https://user-images.githubusercontent.com/568794/212665433-2d1f3d6e-0ba8-4f02-9d1b-27d00c49abd1.png) - -In addition to the increased reliability of the automatic scaling, we have worked on these improvements: - -- No longer require cert-manager as a prerequisite for installing actions-runner-controller -- Reliable scale-up based on job demands and scale-down to zero runner pods -- Reduce API requests to `api.github.com`, no more API rate-limiting problems -- The GitHub Personal Access Token (PAT) or the GitHub App installation token is no longer passed to the runner pod for runner registration -- Maximum flexibility for customizing your runner pod template - -### Demo - -https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a73e-27f5e8c75720.mp4 - -## Setup - -### Prerequisites - -1. Create a K8s cluster, if not available. - - If you don't have a K8s cluster, you can install a local environment using minikube. See [installing minikube](https://minikube.sigs.k8s.io/docs/start/). -1. Install helm 3, if not available. See [installing Helm](https://helm.sh/docs/intro/install/). - -### Install actions-runner-controller - -1. Install actions-runner-controller using helm 3. For additional configuration options, see [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/actions-runner-controller-2/values.yaml) - - ```bash - NAMESPACE="arc-systems" - helm install arc \ - --namespace "${NAMESPACE}" \ - --create-namespace \ - oci://ghcr.io/actions/actions-runner-controller-charts/actions-runner-controller-2 \ - --version 0.2.0 - ``` - -1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app). - - ℹ For the list of required permissions, see [Authenticating to the GitHub API](https://github.com/actions/actions-runner-controller/blob/master/docs/authenticating-to-the-github-api.md#authenticating-to-the-github-api). - -1. You're ready to install the autoscaling runner set. For additional configuration options, see [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/auto-scaling-runner-set/values.yaml) - - ℹ **Choose your installation name carefully**, you will use it as the value of `runs-on` in your workflow. - - ℹ **We recommend you choose a unique namespace in the following steps**. As a good security measure, it's best to have your runner pods created in a different namespace than the one containing the manager and listener pods. - - ```bash - # Using a Personal Access Token (PAT) - INSTALLATION_NAME="arc-runner-set" - NAMESPACE="arc-runners" - GITHUB_CONFIG_URL="https://github.com/" - GITHUB_PAT="" - helm install "${INSTALLATION_NAME}" \ - --namespace "${NAMESPACE}" \ - --create-namespace \ - --set githubConfigUrl="${GITHUB_CONFIG_URL}" \ - --set githubConfigSecret.github_token="${GITHUB_PAT}" \ - oci://ghcr.io/actions/actions-runner-controller-charts/auto-scaling-runner-set --version 0.2.0 - ``` - - ```bash - # Using a GitHub App - INSTALLATION_NAME="arc-runner-set" - NAMESPACE="arc-runners" - GITHUB_CONFIG_URL="https://github.com/" - GITHUB_APP_ID="" - GITHUB_APP_INSTALLATION_ID="" - GITHUB_APP_PRIVATE_KEY="" - helm install arc-runner-set \ - --namespace "${NAMESPACE}" \ - --create-namespace \ - --set githubConfigUrl="${GITHUB_CONFIG_URL}" \ - --set githubConfigSecret.github_app_id="${GITHUB_APP_ID}" \ - --set githubConfigSecret.github_app_installation_id="${GITHUB_APP_INSTALLATION_ID}" \ - --set githubConfigSecret.github_app_private_key="${GITHUB_APP_PRIVATE_KEY}" \ - oci://ghcr.io/actions/actions-runner-controller-charts/auto-scaling-runner-set --version 0.2.0 - ``` - -1. Check your installation. If everything went well, you should see the following: - - ```bash - $ helm list -n "${NAMESPACE}" - - NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION - arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed actions-runner-controller-2-0.2.0 preview - arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed auto-scaling-runner-set-0.2.0 0.2.0 - ``` - - ```bash - $ kubectl get pods -n "${NAMESPACE}" - - NAME READY STATUS RESTARTS AGE - arc-actions-runner-controller-2-8c74b6f95-gr7zr 1/1 Running 0 20m - arc-runner-set-6cd58d58-listener 1/1 Running 0 21s - ``` - -1. In a repository, create a simple test workflow as follows. The `runs-on` value should match the helm installation name you used in the previous step. - - ```yaml - name: Test workflow - on: - workflow_dispatch: - - jobs: - test: - runs-on: arc-runner-set - steps: - - name: Hello world - run: echo "Hello world" - ``` - -1. Run the workflow. You should see the runner pod being created and the workflow being executed. - - ```bash - $ kubectl get pods -A - - NAMESPACE NAME READY STATUS RESTARTS AGE - arc-systems arc-actions-runner-controller-2-8c74b6f95-gr7zr 1/1 Running 0 27m - arc-systems arc-runner-set-6cd58d58-listener 1/1 Running 0 7m52s - arc-runners arc-runner-set-rmrgw-runner-p9p5n 1/1 Running 0 21s - ``` - -## Troubleshooting - -### Check the logs - -You can check the logs of the controller pod using the following command: - -```bash -# Controller logs -$ kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=actions-runner-controller-2 - -# Runner set listener logs -kubectl logs -n "${NAMESPACE}" -l runner-scale-set-listener=arc-systems-arc-runner-set -``` - -### If you installed the autoscaling runner set, but the listener pod is not created - -Verify that the secret you provided is correct and that the `githubConfigUrl` you provided is accurate. - -## Changelog - -### v0.2.0 - -#### Major changes - -1. Added proxy support for the controller and the runner pods, see the new helm chart fields [#2286](https://github.com/actions/actions-runner-controller/pull/2286) -1. Added the abiilty to provide a pre-defined kubernetes secret for the auto scaling runner set helm chart [#2234](https://github.com/actions/actions-runner-controller/pull/2234) -1. Enhanced security posture by removing un-required permissions for the manager-role [#2260](https://github.com/actions/actions-runner-controller/pull/2260) -1. Enhanced our logging by returning an error when a runner group is defined in the values file but it's not created in GitHub [#2215](https://github.com/actions/actions-runner-controller/pull/2215) -1. Fixed helm charts issues that were preventing the use of DinD [#2291](https://github.com/actions/actions-runner-controller/pull/2291) -1. Fixed a bug that was preventing runner scale from being removed from the backend when they were deleted from the cluster [#2255](https://github.com/actions/actions-runner-controller/pull/2255) [#2223](https://github.com/actions/actions-runner-controller/pull/2223) -1. Fixed bugs with the helm chart definitions preventing certain values from being set [#2222](https://github.com/actions/actions-runner-controller/pull/2222) -1. Fixed a bug that prevented the configuration of a runner group for a runner scale set [#2216](https://github.com/actions/actions-runner-controller/pull/2216) - -#### Log - -- [1c7b7f4](https://github.com/actions/actions-runner-controller/commit/1c7b7f4) Bump arc-2 chart version and prepare 0.2.0 release [#2313](https://github.com/actions/actions-runner-controller/pull/2313) -- [73e22a1](https://github.com/actions/actions-runner-controller/commit/73e22a1) Disable metrics serving in proxy tests [#2307](https://github.com/actions/actions-runner-controller/pull/2307) -- [9b44f00](https://github.com/actions/actions-runner-controller/commit/9b44f00) Documentation corrections [#2116](https://github.com/actions/actions-runner-controller/pull/2116) -- [6b4250c](https://github.com/actions/actions-runner-controller/commit/6b4250c) Add support for proxy [#2286](https://github.com/actions/actions-runner-controller/pull/2286) -- [ced8822](https://github.com/actions/actions-runner-controller/commit/ced8822) Resolves the erroneous webhook scale down due to check runs [#2119](https://github.com/actions/actions-runner-controller/pull/2119) -- [44c06c2](https://github.com/actions/actions-runner-controller/commit/44c06c2) fix: case-insensitive webhook label matching [#2302](https://github.com/actions/actions-runner-controller/pull/2302) -- [4103fe3](https://github.com/actions/actions-runner-controller/commit/4103fe3) Use DOCKER_IMAGE_NAME instead of NAME to avoid conflict. [#2303](https://github.com/actions/actions-runner-controller/pull/2303) -- [a44fe04](https://github.com/actions/actions-runner-controller/commit/a44fe04) Fix manager crashloopback for ARC deployments without scaleset-related controllers [#2293](https://github.com/actions/actions-runner-controller/pull/2293) -- [274d0c8](https://github.com/actions/actions-runner-controller/commit/274d0c8) Added ability to configure log level from chart values [#2252](https://github.com/actions/actions-runner-controller/pull/2252) -- [256e08e](https://github.com/actions/actions-runner-controller/commit/256e08e) Ask runner to wait for docker daemon from DinD. [#2292](https://github.com/actions/actions-runner-controller/pull/2292) -- [f677fd5](https://github.com/actions/actions-runner-controller/commit/f677fd5) doc: Fix chart name for helm commands in docs [#2287](https://github.com/actions/actions-runner-controller/pull/2287) -- [d962714](https://github.com/actions/actions-runner-controller/commit/d962714) Fix helm chart when containerMode.type=dind. [#2291](https://github.com/actions/actions-runner-controller/pull/2291) -- [3886f28](https://github.com/actions/actions-runner-controller/commit/3886f28) Add EKS test environment Terraform templates [#2290](https://github.com/actions/actions-runner-controller/pull/2290) -- [dab9004](https://github.com/actions/actions-runner-controller/commit/dab9004) Added workflow to be triggered via rest api dispatch in e2e test [#2283](https://github.com/actions/actions-runner-controller/pull/2283) -- [dd8ec1a](https://github.com/actions/actions-runner-controller/commit/dd8ec1a) Add testserver package [#2281](https://github.com/actions/actions-runner-controller/pull/2281) -- [8e52a6d](https://github.com/actions/actions-runner-controller/commit/8e52a6d) EphemeralRunner: On cleanup, if pod is pending, delete from service [#2255](https://github.com/actions/actions-runner-controller/pull/2255) -- [9990243](https://github.com/actions/actions-runner-controller/commit/9990243) Early return if finalizer does not exist to make it more readable [#2262](https://github.com/actions/actions-runner-controller/pull/2262) -- [0891981](https://github.com/actions/actions-runner-controller/commit/0891981) Port ADRs from internal repo [#2267](https://github.com/actions/actions-runner-controller/pull/2267) -- [facae69](https://github.com/actions/actions-runner-controller/commit/facae69) Remove un-required permissions for the manager-role of the new `AutoScalingRunnerSet` [#2260](https://github.com/actions/actions-runner-controller/pull/2260) -- [8f62e35](https://github.com/actions/actions-runner-controller/commit/8f62e35) Add options to multi client [#2257](https://github.com/actions/actions-runner-controller/pull/2257) -- [55951c2](https://github.com/actions/actions-runner-controller/commit/55951c2) Add new workflow to automate runner updates [#2247](https://github.com/actions/actions-runner-controller/pull/2247) -- [c4297d2](https://github.com/actions/actions-runner-controller/commit/c4297d2) Avoid deleting scale set if annotation is not parsable or if it does not exist [#2239](https://github.com/actions/actions-runner-controller/pull/2239) -- [0774f06](https://github.com/actions/actions-runner-controller/commit/0774f06) ADR: automate runner updates [#2244](https://github.com/actions/actions-runner-controller/pull/2244) -- [92ab11b](https://github.com/actions/actions-runner-controller/commit/92ab11b) Use UUID v5 for client identifiers [#2241](https://github.com/actions/actions-runner-controller/pull/2241) -- [7414dc6](https://github.com/actions/actions-runner-controller/commit/7414dc6) Add Identifier to actions.Client [#2237](https://github.com/actions/actions-runner-controller/pull/2237) -- [34efb9d](https://github.com/actions/actions-runner-controller/commit/34efb9d) Add documentation to update ARC with prometheus CRDs needed by actions metrics server [#2209](https://github.com/actions/actions-runner-controller/pull/2209) -- [fbad561](https://github.com/actions/actions-runner-controller/commit/fbad561) Allow provide pre-defined kubernetes secret when helm-install AutoScalingRunnerSet [#2234](https://github.com/actions/actions-runner-controller/pull/2234) -- [a5cef7e](https://github.com/actions/actions-runner-controller/commit/a5cef7e) Resolve CI break due to bad merge. [#2236](https://github.com/actions/actions-runner-controller/pull/2236) -- [1f4fe46](https://github.com/actions/actions-runner-controller/commit/1f4fe46) Delete RunnerScaleSet on service when AutoScalingRunnerSet is deleted. [#2223](https://github.com/actions/actions-runner-controller/pull/2223) -- [067686c](https://github.com/actions/actions-runner-controller/commit/067686c) Fix typos and markdown structure in troubleshooting guide [#2148](https://github.com/actions/actions-runner-controller/pull/2148) -- [df12e00](https://github.com/actions/actions-runner-controller/commit/df12e00) Remove network requests from actions.NewClient [#2219](https://github.com/actions/actions-runner-controller/pull/2219) -- [cc26593](https://github.com/actions/actions-runner-controller/commit/cc26593) Skip CT when list-changed=false. [#2228](https://github.com/actions/actions-runner-controller/pull/2228) -- [835eac7](https://github.com/actions/actions-runner-controller/commit/835eac7) Fix helm charts when pass values file. [#2222](https://github.com/actions/actions-runner-controller/pull/2222) -- [01e9dd3](https://github.com/actions/actions-runner-controller/commit/01e9dd3) Update Validate ARC workflow to go 1.19 [#2220](https://github.com/actions/actions-runner-controller/pull/2220) -- [8038181](https://github.com/actions/actions-runner-controller/commit/8038181) Allow update runner group for AutoScalingRunnerSet [#2216](https://github.com/actions/actions-runner-controller/pull/2216) -- [219ba5b](https://github.com/actions/actions-runner-controller/commit/219ba5b) chore(deps): bump sigs.k8s.io/controller-runtime from 0.13.1 to 0.14.1 [#2132](https://github.com/actions/actions-runner-controller/pull/2132) -- [b09e3a2](https://github.com/actions/actions-runner-controller/commit/b09e3a2) Return error for non-existing runner group. [#2215](https://github.com/actions/actions-runner-controller/pull/2215) -- [7ea60e4](https://github.com/actions/actions-runner-controller/commit/7ea60e4) Fix intermittent image push failures to GHCR [#2214](https://github.com/actions/actions-runner-controller/pull/2214) -- [c8918f5](https://github.com/actions/actions-runner-controller/commit/c8918f5) Fix URL for authenticating using a GitHub app [#2206](https://github.com/actions/actions-runner-controller/pull/2206) -- [d57d17f](https://github.com/actions/actions-runner-controller/commit/d57d17f) Add support for custom CA in actions.Client [#2199](https://github.com/actions/actions-runner-controller/pull/2199) -- [6e69c75](https://github.com/actions/actions-runner-controller/commit/6e69c75) chore(deps): bump github.com/hashicorp/go-retryablehttp from 0.7.1 to 0.7.2 [#2203](https://github.com/actions/actions-runner-controller/pull/2203) -- [882bfab](https://github.com/actions/actions-runner-controller/commit/882bfab) Renaming autoScaling to autoscaling in tests matching the convention [#2201](https://github.com/actions/actions-runner-controller/pull/2201) -- [3327f62](https://github.com/actions/actions-runner-controller/commit/3327f62) Refactor actions.Client with options to help extensibility [#2193](https://github.com/actions/actions-runner-controller/pull/2193) -- [282f2dd](https://github.com/actions/actions-runner-controller/commit/282f2dd) chore(deps): bump github.com/onsi/gomega from 1.20.2 to 1.25.0 [#2169](https://github.com/actions/actions-runner-controller/pull/2169) -- [d67f808](https://github.com/actions/actions-runner-controller/commit/d67f808) Include nikola-jokic in CODEOWNERS file [#2184](https://github.com/actions/actions-runner-controller/pull/2184) -- [4932412](https://github.com/actions/actions-runner-controller/commit/4932412) Fix L0 test to make it more reliable. [#2178](https://github.com/actions/actions-runner-controller/pull/2178) -- [6da1cde](https://github.com/actions/actions-runner-controller/commit/6da1cde) Update runner version to 2.301.1 [#2182](https://github.com/actions/actions-runner-controller/pull/2182) -- [f9bae70](https://github.com/actions/actions-runner-controller/commit/f9bae70) Add distinct namespace best practice note [#2181](https://github.com/actions/actions-runner-controller/pull/2181) -- [05a3908](https://github.com/actions/actions-runner-controller/commit/05a3908) Add arc-2 quickstart guide [#2180](https://github.com/actions/actions-runner-controller/pull/2180) -- [606ed1b](https://github.com/actions/actions-runner-controller/commit/606ed1b) Add Repository information to Runner Status [#2093](https://github.com/actions/actions-runner-controller/pull/2093) +301 - MOVED TO [../gha-runner-scale-set-controller/README.md](../gha-runner-scale-set-controller/README.md) \ No newline at end of file diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md new file mode 100644 index 0000000000..791d33ccea --- /dev/null +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -0,0 +1,211 @@ +# Autoscaling Runner Scale Sets mode + +**⚠️ This mode is currently only available for a limited number of organizations.** + +This new autoscaling mode brings numerous enhancements (described in the following sections) that will make your experience more reliable and secure. + +## How it works + +![arc_hld_v1 drawio (1)](https://user-images.githubusercontent.com/568794/212665433-2d1f3d6e-0ba8-4f02-9d1b-27d00c49abd1.png) + +In addition to the increased reliability of the automatic scaling, we have worked on these improvements: + +- No longer require cert-manager as a prerequisite for installing actions-runner-controller +- Reliable scale-up based on job demands and scale-down to zero runner pods +- Reduce API requests to `api.github.com`, no more API rate-limiting problems +- The GitHub Personal Access Token (PAT) or the GitHub App installation token is no longer passed to the runner pod for runner registration +- Maximum flexibility for customizing your runner pod template + +### Demo + +https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a73e-27f5e8c75720.mp4 + +## Setup + +### Prerequisites + +1. Create a K8s cluster, if not available. + - If you don't have a K8s cluster, you can install a local environment using minikube. See [installing minikube](https://minikube.sigs.k8s.io/docs/start/). +1. Install helm 3, if not available. See [installing Helm](https://helm.sh/docs/intro/install/). + +### Install actions-runner-controller + +1. Install actions-runner-controller using helm 3. For additional configuration options, see [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/gha-runner-scale-set-controller/values.yaml) + + ```bash + NAMESPACE="arc-systems" + helm install arc \ + --namespace "${NAMESPACE}" \ + --create-namespace \ + oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \ + --version 0.2.0 + ``` + +1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app). + - ℹ For the list of required permissions, see [Authenticating to the GitHub API](https://github.com/actions/actions-runner-controller/blob/master/docs/authenticating-to-the-github-api.md#authenticating-to-the-github-api). + +1. You're ready to install the autoscaling runner set. For additional configuration options, see [values.yaml](https://github.com/actions/actions-runner-controller/blob/master/charts/gha-runner-scale-set/values.yaml) + - ℹ **Choose your installation name carefully**, you will use it as the value of `runs-on` in your workflow. + - ℹ **We recommend you choose a unique namespace in the following steps**. As a good security measure, it's best to have your runner pods created in a different namespace than the one containing the manager and listener pods. + + ```bash + # Using a Personal Access Token (PAT) + INSTALLATION_NAME="arc-runner-set" + NAMESPACE="arc-runners" + GITHUB_CONFIG_URL="https://github.com/" + GITHUB_PAT="" + helm install "${INSTALLATION_NAME}" \ + --namespace "${NAMESPACE}" \ + --create-namespace \ + --set githubConfigUrl="${GITHUB_CONFIG_URL}" \ + --set githubConfigSecret.github_token="${GITHUB_PAT}" \ + oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.2.0 + ``` + + ```bash + # Using a GitHub App + INSTALLATION_NAME="arc-runner-set" + NAMESPACE="arc-runners" + GITHUB_CONFIG_URL="https://github.com/" + GITHUB_APP_ID="" + GITHUB_APP_INSTALLATION_ID="" + GITHUB_APP_PRIVATE_KEY="" + helm install arc-runner-set \ + --namespace "${NAMESPACE}" \ + --create-namespace \ + --set githubConfigUrl="${GITHUB_CONFIG_URL}" \ + --set githubConfigSecret.github_app_id="${GITHUB_APP_ID}" \ + --set githubConfigSecret.github_app_installation_id="${GITHUB_APP_INSTALLATION_ID}" \ + --set githubConfigSecret.github_app_private_key="${GITHUB_APP_PRIVATE_KEY}" \ + oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.2.0 + ``` + +1. Check your installation. If everything went well, you should see the following: + + ```bash + $ helm list -n "${NAMESPACE}" + + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.2.0 preview + arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.2.0 0.2.0 + ``` + + ```bash + $ kubectl get pods -n "${NAMESPACE}" + + NAME READY STATUS RESTARTS AGE + arc-gha-runner-scale-set-controller-8c74b6f95-gr7zr 1/1 Running 0 20m + arc-runner-set-6cd58d58-listener 1/1 Running 0 21s + ``` + +1. In a repository, create a simple test workflow as follows. The `runs-on` value should match the helm installation name you used in the previous step. + + ```yaml + name: Test workflow + on: + workflow_dispatch: + + jobs: + test: + runs-on: arc-runner-set + steps: + - name: Hello world + run: echo "Hello world" + ``` + +1. Run the workflow. You should see the runner pod being created and the workflow being executed. + + ```bash + $ kubectl get pods -A + + NAMESPACE NAME READY STATUS RESTARTS AGE + arc-systems arc-gha-runner-scale-set-controller-8c74b6f95-gr7zr 1/1 Running 0 27m + arc-systems arc-runner-set-6cd58d58-listener 1/1 Running 0 7m52s + arc-runners arc-runner-set-rmrgw-runner-p9p5n 1/1 Running 0 21s + ``` + +## Troubleshooting + +### Check the logs + +You can check the logs of the controller pod using the following command: + +```bash +# Controller logs +$ kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-runner-scale-set-controller + +# Runner set listener logs +kubectl logs -n "${NAMESPACE}" -l runner-scale-set-listener=arc-systems-arc-runner-set +``` + +### If you installed the autoscaling runner set, but the listener pod is not created + +Verify that the secret you provided is correct and that the `githubConfigUrl` you provided is accurate. + +## Changelog + +### v0.2.0 + +#### Major changes + +1. Added proxy support for the controller and the runner pods, see the new helm chart fields [#2286](https://github.com/actions/actions-runner-controller/pull/2286) +1. Added the abiilty to provide a pre-defined kubernetes secret for the auto scaling runner set helm chart [#2234](https://github.com/actions/actions-runner-controller/pull/2234) +1. Enhanced security posture by removing un-required permissions for the manager-role [#2260](https://github.com/actions/actions-runner-controller/pull/2260) +1. Enhanced our logging by returning an error when a runner group is defined in the values file but it's not created in GitHub [#2215](https://github.com/actions/actions-runner-controller/pull/2215) +1. Fixed helm charts issues that were preventing the use of DinD [#2291](https://github.com/actions/actions-runner-controller/pull/2291) +1. Fixed a bug that was preventing runner scale from being removed from the backend when they were deleted from the cluster [#2255](https://github.com/actions/actions-runner-controller/pull/2255) [#2223](https://github.com/actions/actions-runner-controller/pull/2223) +1. Fixed bugs with the helm chart definitions preventing certain values from being set [#2222](https://github.com/actions/actions-runner-controller/pull/2222) +1. Fixed a bug that prevented the configuration of a runner group for a runner scale set [#2216](https://github.com/actions/actions-runner-controller/pull/2216) + +#### Log + +- [1c7b7f4](https://github.com/actions/actions-runner-controller/commit/1c7b7f4) Bump arc-2 chart version and prepare 0.2.0 release [#2313](https://github.com/actions/actions-runner-controller/pull/2313) +- [73e22a1](https://github.com/actions/actions-runner-controller/commit/73e22a1) Disable metrics serving in proxy tests [#2307](https://github.com/actions/actions-runner-controller/pull/2307) +- [9b44f00](https://github.com/actions/actions-runner-controller/commit/9b44f00) Documentation corrections [#2116](https://github.com/actions/actions-runner-controller/pull/2116) +- [6b4250c](https://github.com/actions/actions-runner-controller/commit/6b4250c) Add support for proxy [#2286](https://github.com/actions/actions-runner-controller/pull/2286) +- [ced8822](https://github.com/actions/actions-runner-controller/commit/ced8822) Resolves the erroneous webhook scale down due to check runs [#2119](https://github.com/actions/actions-runner-controller/pull/2119) +- [44c06c2](https://github.com/actions/actions-runner-controller/commit/44c06c2) fix: case-insensitive webhook label matching [#2302](https://github.com/actions/actions-runner-controller/pull/2302) +- [4103fe3](https://github.com/actions/actions-runner-controller/commit/4103fe3) Use DOCKER_IMAGE_NAME instead of NAME to avoid conflict. [#2303](https://github.com/actions/actions-runner-controller/pull/2303) +- [a44fe04](https://github.com/actions/actions-runner-controller/commit/a44fe04) Fix manager crashloopback for ARC deployments without scaleset-related controllers [#2293](https://github.com/actions/actions-runner-controller/pull/2293) +- [274d0c8](https://github.com/actions/actions-runner-controller/commit/274d0c8) Added ability to configure log level from chart values [#2252](https://github.com/actions/actions-runner-controller/pull/2252) +- [256e08e](https://github.com/actions/actions-runner-controller/commit/256e08e) Ask runner to wait for docker daemon from DinD. [#2292](https://github.com/actions/actions-runner-controller/pull/2292) +- [f677fd5](https://github.com/actions/actions-runner-controller/commit/f677fd5) doc: Fix chart name for helm commands in docs [#2287](https://github.com/actions/actions-runner-controller/pull/2287) +- [d962714](https://github.com/actions/actions-runner-controller/commit/d962714) Fix helm chart when containerMode.type=dind. [#2291](https://github.com/actions/actions-runner-controller/pull/2291) +- [3886f28](https://github.com/actions/actions-runner-controller/commit/3886f28) Add EKS test environment Terraform templates [#2290](https://github.com/actions/actions-runner-controller/pull/2290) +- [dab9004](https://github.com/actions/actions-runner-controller/commit/dab9004) Added workflow to be triggered via rest api dispatch in e2e test [#2283](https://github.com/actions/actions-runner-controller/pull/2283) +- [dd8ec1a](https://github.com/actions/actions-runner-controller/commit/dd8ec1a) Add testserver package [#2281](https://github.com/actions/actions-runner-controller/pull/2281) +- [8e52a6d](https://github.com/actions/actions-runner-controller/commit/8e52a6d) EphemeralRunner: On cleanup, if pod is pending, delete from service [#2255](https://github.com/actions/actions-runner-controller/pull/2255) +- [9990243](https://github.com/actions/actions-runner-controller/commit/9990243) Early return if finalizer does not exist to make it more readable [#2262](https://github.com/actions/actions-runner-controller/pull/2262) +- [0891981](https://github.com/actions/actions-runner-controller/commit/0891981) Port ADRs from internal repo [#2267](https://github.com/actions/actions-runner-controller/pull/2267) +- [facae69](https://github.com/actions/actions-runner-controller/commit/facae69) Remove un-required permissions for the manager-role of the new `AutoScalingRunnerSet` [#2260](https://github.com/actions/actions-runner-controller/pull/2260) +- [8f62e35](https://github.com/actions/actions-runner-controller/commit/8f62e35) Add options to multi client [#2257](https://github.com/actions/actions-runner-controller/pull/2257) +- [55951c2](https://github.com/actions/actions-runner-controller/commit/55951c2) Add new workflow to automate runner updates [#2247](https://github.com/actions/actions-runner-controller/pull/2247) +- [c4297d2](https://github.com/actions/actions-runner-controller/commit/c4297d2) Avoid deleting scale set if annotation is not parsable or if it does not exist [#2239](https://github.com/actions/actions-runner-controller/pull/2239) +- [0774f06](https://github.com/actions/actions-runner-controller/commit/0774f06) ADR: automate runner updates [#2244](https://github.com/actions/actions-runner-controller/pull/2244) +- [92ab11b](https://github.com/actions/actions-runner-controller/commit/92ab11b) Use UUID v5 for client identifiers [#2241](https://github.com/actions/actions-runner-controller/pull/2241) +- [7414dc6](https://github.com/actions/actions-runner-controller/commit/7414dc6) Add Identifier to actions.Client [#2237](https://github.com/actions/actions-runner-controller/pull/2237) +- [34efb9d](https://github.com/actions/actions-runner-controller/commit/34efb9d) Add documentation to update ARC with prometheus CRDs needed by actions metrics server [#2209](https://github.com/actions/actions-runner-controller/pull/2209) +- [fbad561](https://github.com/actions/actions-runner-controller/commit/fbad561) Allow provide pre-defined kubernetes secret when helm-install AutoScalingRunnerSet [#2234](https://github.com/actions/actions-runner-controller/pull/2234) +- [a5cef7e](https://github.com/actions/actions-runner-controller/commit/a5cef7e) Resolve CI break due to bad merge. [#2236](https://github.com/actions/actions-runner-controller/pull/2236) +- [1f4fe46](https://github.com/actions/actions-runner-controller/commit/1f4fe46) Delete RunnerScaleSet on service when AutoScalingRunnerSet is deleted. [#2223](https://github.com/actions/actions-runner-controller/pull/2223) +- [067686c](https://github.com/actions/actions-runner-controller/commit/067686c) Fix typos and markdown structure in troubleshooting guide [#2148](https://github.com/actions/actions-runner-controller/pull/2148) +- [df12e00](https://github.com/actions/actions-runner-controller/commit/df12e00) Remove network requests from actions.NewClient [#2219](https://github.com/actions/actions-runner-controller/pull/2219) +- [cc26593](https://github.com/actions/actions-runner-controller/commit/cc26593) Skip CT when list-changed=false. [#2228](https://github.com/actions/actions-runner-controller/pull/2228) +- [835eac7](https://github.com/actions/actions-runner-controller/commit/835eac7) Fix helm charts when pass values file. [#2222](https://github.com/actions/actions-runner-controller/pull/2222) +- [01e9dd3](https://github.com/actions/actions-runner-controller/commit/01e9dd3) Update Validate ARC workflow to go 1.19 [#2220](https://github.com/actions/actions-runner-controller/pull/2220) +- [8038181](https://github.com/actions/actions-runner-controller/commit/8038181) Allow update runner group for AutoScalingRunnerSet [#2216](https://github.com/actions/actions-runner-controller/pull/2216) +- [219ba5b](https://github.com/actions/actions-runner-controller/commit/219ba5b) chore(deps): bump sigs.k8s.io/controller-runtime from 0.13.1 to 0.14.1 [#2132](https://github.com/actions/actions-runner-controller/pull/2132) +- [b09e3a2](https://github.com/actions/actions-runner-controller/commit/b09e3a2) Return error for non-existing runner group. [#2215](https://github.com/actions/actions-runner-controller/pull/2215) +- [7ea60e4](https://github.com/actions/actions-runner-controller/commit/7ea60e4) Fix intermittent image push failures to GHCR [#2214](https://github.com/actions/actions-runner-controller/pull/2214) +- [c8918f5](https://github.com/actions/actions-runner-controller/commit/c8918f5) Fix URL for authenticating using a GitHub app [#2206](https://github.com/actions/actions-runner-controller/pull/2206) +- [d57d17f](https://github.com/actions/actions-runner-controller/commit/d57d17f) Add support for custom CA in actions.Client [#2199](https://github.com/actions/actions-runner-controller/pull/2199) +- [6e69c75](https://github.com/actions/actions-runner-controller/commit/6e69c75) chore(deps): bump github.com/hashicorp/go-retryablehttp from 0.7.1 to 0.7.2 [#2203](https://github.com/actions/actions-runner-controller/pull/2203) +- [882bfab](https://github.com/actions/actions-runner-controller/commit/882bfab) Renaming autoScaling to autoscaling in tests matching the convention [#2201](https://github.com/actions/actions-runner-controller/pull/2201) +- [3327f62](https://github.com/actions/actions-runner-controller/commit/3327f62) Refactor actions.Client with options to help extensibility [#2193](https://github.com/actions/actions-runner-controller/pull/2193) +- [282f2dd](https://github.com/actions/actions-runner-controller/commit/282f2dd) chore(deps): bump github.com/onsi/gomega from 1.20.2 to 1.25.0 [#2169](https://github.com/actions/actions-runner-controller/pull/2169) +- [d67f808](https://github.com/actions/actions-runner-controller/commit/d67f808) Include nikola-jokic in CODEOWNERS file [#2184](https://github.com/actions/actions-runner-controller/pull/2184) +- [4932412](https://github.com/actions/actions-runner-controller/commit/4932412) Fix L0 test to make it more reliable. [#2178](https://github.com/actions/actions-runner-controller/pull/2178) +- [6da1cde](https://github.com/actions/actions-runner-controller/commit/6da1cde) Update runner version to 2.301.1 [#2182](https://github.com/actions/actions-runner-controller/pull/2182) +- [f9bae70](https://github.com/actions/actions-runner-controller/commit/f9bae70) Add distinct namespace best practice note [#2181](https://github.com/actions/actions-runner-controller/pull/2181) +- [05a3908](https://github.com/actions/actions-runner-controller/commit/05a3908) Add arc-2 quickstart guide [#2180](https://github.com/actions/actions-runner-controller/pull/2180) +- [606ed1b](https://github.com/actions/actions-runner-controller/commit/606ed1b) Add Repository information to Runner Status [#2093](https://github.com/actions/actions-runner-controller/pull/2093) diff --git a/test/platforms/aws-eks/README.md b/test/platforms/aws-eks/README.md index 354d1df8a6..a1165dd160 100644 --- a/test/platforms/aws-eks/README.md +++ b/test/platforms/aws-eks/README.md @@ -64,7 +64,7 @@ aws eks --region "${AWS_REGION}" update-kubeconfig \ kubectl cluster-info ``` -Setup ARC by following [this quick-start guide](https://github.com/actions/actions-runner-controller/tree/master/docs/preview/actions-runner-controller-2). +Setup ARC by following [this quick-start guide](https://github.com/actions/actions-runner-controller/tree/master/docs/preview/gha-runner-scale-set-controller). ### Troubleshooting From 400fcfa55fe4caec02d87132332ca12068bb0c6a Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Wed, 1 Mar 2023 16:55:08 +0100 Subject: [PATCH 096/561] Update trigger events for validate-chart (#2342) --- .github/workflows/validate-chart.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/validate-chart.yaml b/.github/workflows/validate-chart.yaml index 6eca19f466..61ade607eb 100644 --- a/.github/workflows/validate-chart.yaml +++ b/.github/workflows/validate-chart.yaml @@ -1,6 +1,14 @@ name: Validate Helm Chart on: + pull_request: + branches: + - master + paths: + - 'charts/**' + - '.github/workflows/validate-chart.yaml' + - '!charts/actions-runner-controller/docs/**' + - '!**.md' push: paths: - 'charts/**' @@ -26,7 +34,8 @@ jobs: fetch-depth: 0 - name: Set up Helm - uses: azure/setup-helm@v3.4 + # Using https://github.com/Azure/setup-helm/releases/tag/v3.5 + uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 with: version: ${{ env.HELM_VERSION }} From 01e33bfa90fe5007140b151730be62f9694a635b Mon Sep 17 00:00:00 2001 From: Ava Stancu Date: Wed, 1 Mar 2023 23:10:34 +0100 Subject: [PATCH 097/561] Matrix jobs workflow path update (#2349) --- test_e2e_arc/arc_jobs_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test_e2e_arc/arc_jobs_test.go b/test_e2e_arc/arc_jobs_test.go index 812859bc89..02db92f67c 100644 --- a/test_e2e_arc/arc_jobs_test.go +++ b/test_e2e_arc/arc_jobs_test.go @@ -95,8 +95,8 @@ func TestARCJobs(t *testing.T) { dateTime := os.Getenv("DATE_TIME") // We are triggering manually a workflow that already exists in the repo. // This workflow is expected to spin up a number of runner pods matching the runners value set in podCountsByType. - url := "https://api.github.com/repos/actions/actions-runner-controller/actions/workflows/e2e-test-dispatch-workflow.yaml/dispatches" - jsonStr := []byte(fmt.Sprintf(`{"ref":"master", "inputs":{"date_time":"%s"}}`, dateTime)) + url := "https://api.github.com/repos/actions-runner-controller/arc_e2e_test_dummy/actions/workflows/e2e-test-dispatch-workflow.yaml/dispatches" + jsonStr := []byte(fmt.Sprintf(`{"ref":"main", "inputs":{"date_time":"%s"}}`, dateTime)) req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr)) if err != nil { From 4f5370711374af3117f3e0f9f32fd5600d52239d Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Thu, 2 Mar 2023 03:16:40 -0500 Subject: [PATCH 098/561] Make CT test to install charts in the right order. (#2350) --- charts/.ci/ct-config.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/charts/.ci/ct-config.yaml b/charts/.ci/ct-config.yaml index 28cbc0ab26..64c8d1bb2a 100644 --- a/charts/.ci/ct-config.yaml +++ b/charts/.ci/ct-config.yaml @@ -1,6 +1,9 @@ # This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow -all: true lint-conf: charts/.ci/lint-config.yaml chart-repos: - jetstack=https://charts.jetstack.io check-version-increment: false # Disable checking that the chart version has been bumped +charts: +- charts/actions-runner-controller +- charts/gha-runner-scale-set-controller +- charts/gha-runner-scale-set \ No newline at end of file From 8386e05d99d4f7fd38c0deeb90f3089a8c014bb1 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Thu, 2 Mar 2023 10:35:55 +0100 Subject: [PATCH 099/561] Chart naming validation on AutoscalingRunnerSet install (#2347) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Co-authored-by: Bassem Dghaidi --- .../templates/autoscalingrunnerset.yaml | 12 ++++-- .../tests/template_test.go | 42 +++++++++++++++++++ .../gha-runner-scale-set-controller/README.md | 30 +++++++++---- 3 files changed, 73 insertions(+), 11 deletions(-) diff --git a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index d01f8cd966..478fc775ca 100644 --- a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -1,6 +1,12 @@ apiVersion: actions.github.com/v1alpha1 kind: AutoscalingRunnerSet metadata: + {{- if or (not .Release.Name) (gt (len .Release.Name) 45) }} + {{ fail "Name must have up to 45 characters" }} + {{- end }} + {{- if gt (len .Release.Namespace) 63 }} + {{ fail "Namespace must have up to 63 characters" }} + {{- end }} name: {{ .Release.Name }} namespace: {{ .Release.Namespace }} labels: @@ -82,7 +88,7 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} {{- end }} - containers: + containers: {{- if eq .Values.containerMode.type "dind" }} - name: runner {{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }} @@ -97,7 +103,7 @@ spec: {{ .Values.template.spec.containers | toYaml | nindent 6 }} {{- end }} {{- if or .Values.template.spec.volumes (eq .Values.containerMode.type "dind") (eq .Values.containerMode.type "kubernetes") }} - volumes: + volumes: {{- if eq .Values.containerMode.type "dind" }} {{- include "gha-runner-scale-set.dind-volume" . | nindent 6 }} {{- include "gha-runner-scale-set.dind-work-volume" . | nindent 6 }} @@ -105,4 +111,4 @@ spec: {{- include "gha-runner-scale-set.kubernetes-mode-work-volume" . | nindent 6 }} {{- end }} {{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }} - {{- end }} \ No newline at end of file + {{- end }} diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index a26e571a6c..c9939415f9 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -780,3 +780,45 @@ func TestTemplateRenderedWithProxy(t *testing.T) { assert.Contains(t, ars.Spec.Proxy.NoProxy, "example.com") assert.Contains(t, ars.Spec.Proxy.NoProxy, "example.org") } + +func TestTemplateNamingConstraints(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + setValues := map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "", + } + + tt := map[string]struct { + releaseName string + namespaceName string + expectedError string + }{ + "Name too long": { + releaseName: strings.Repeat("a", 46), + namespaceName: "test-" + strings.ToLower(random.UniqueId()), + expectedError: "Name must have up to 45 characters", + }, + "Namespace too long": { + releaseName: "test-" + strings.ToLower(random.UniqueId()), + namespaceName: strings.Repeat("a", 64), + expectedError: "Namespace must have up to 63 characters", + }, + } + + for name, tc := range tt { + t.Run(name, func(t *testing.T) { + options := &helm.Options{ + SetValues: setValues, + KubectlOptions: k8s.NewKubectlOptions("", "", tc.namespaceName), + } + _, err = helm.RenderTemplateE(t, options, helmChartPath, tc.releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + require.Error(t, err) + assert.ErrorContains(t, err, tc.expectedError) + }) + } +} diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index 791d33ccea..50c98af7fc 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -50,7 +50,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 ```bash # Using a Personal Access Token (PAT) - INSTALLATION_NAME="arc-runner-set" + INSTALLATION_NAME="arc-runner-set" NAMESPACE="arc-runners" GITHUB_CONFIG_URL="https://github.com/" GITHUB_PAT="" @@ -64,9 +64,9 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 ```bash # Using a GitHub App - INSTALLATION_NAME="arc-runner-set" + INSTALLATION_NAME="arc-runner-set" NAMESPACE="arc-runners" - GITHUB_CONFIG_URL="https://github.com/" + GITHUB_CONFIG_URL="https://github.com/" GITHUB_APP_ID="" GITHUB_APP_INSTALLATION_ID="" GITHUB_APP_PRIVATE_KEY="" @@ -86,8 +86,8 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 $ helm list -n "${NAMESPACE}" NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION - arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.2.0 preview - arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.2.0 0.2.0 + arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.2.0 preview + arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.2.0 0.2.0 ``` ```bash @@ -118,10 +118,10 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 ```bash $ kubectl get pods -A - NAMESPACE NAME READY STATUS RESTARTS AGE + NAMESPACE NAME READY STATUS RESTARTS AGE arc-systems arc-gha-runner-scale-set-controller-8c74b6f95-gr7zr 1/1 Running 0 27m - arc-systems arc-runner-set-6cd58d58-listener 1/1 Running 0 7m52s - arc-runners arc-runner-set-rmrgw-runner-p9p5n 1/1 Running 0 21s + arc-systems arc-runner-set-6cd58d58-listener 1/1 Running 0 7m52s + arc-runners arc-runner-set-rmrgw-runner-p9p5n 1/1 Running 0 21s ``` ## Troubleshooting @@ -138,6 +138,20 @@ $ kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-runner-scale-set- kubectl logs -n "${NAMESPACE}" -l runner-scale-set-listener=arc-systems-arc-runner-set ``` +### Naming error: `Name must have up to characters` + +We are using some of the resources generated names as labels for other resources. Resource names have a max length of `263 characters` while labels are limited to `63 characters`. Given this constraint, we have to limit the resource names to `63 characters`. + +Since part of the resource name is defined by you, we have to impose a limit on the amount of characters you can use for the installation and namespace names. + +If you see these errors, you have to use shorter installation or namespace names. + +```bash +Error: INSTALLATION FAILED: execution error at (gha-runner-scale-set/templates/autoscalingrunnerset.yaml:5:5): Name must have up to 45 characters + +Error: INSTALLATION FAILED: execution error at (gha-runner-scale-set/templates/autoscalingrunnerset.yaml:8:5): Namespace must have up to 63 characters +``` + ### If you installed the autoscaling runner set, but the listener pod is not created Verify that the secret you provided is correct and that the `githubConfigUrl` you provided is accurate. From 715e763386b28afed8c4a00025043372160d3fec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Mar 2023 10:41:18 +0100 Subject: [PATCH 100/561] bump golang.org/x/net from 0.5.0 to 0.7.0 (#2299) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c977395aee..2e4a792f5c 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/teambition/rrule-go v1.8.0 go.uber.org/multierr v1.7.0 go.uber.org/zap v1.24.0 - golang.org/x/net v0.6.0 + golang.org/x/net v0.7.0 golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 gomodules.xyz/jsonpatch/v2 v2.2.0 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 56ef6bbc1b..7a42ba8130 100644 --- a/go.sum +++ b/go.sum @@ -452,8 +452,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= From 5db1b7f1934e96de8bb25a6eeaeb86155645380f Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Thu, 2 Mar 2023 17:25:50 +0100 Subject: [PATCH 101/561] Split listener pod label to avoid long names issue (#2341) --- .../autoscalingrunnerset_controller.go | 5 --- .../actions.github.com/resourcebuilder.go | 35 +++++++++++-------- .../gha-runner-scale-set-controller/README.md | 2 +- 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 99fa0cb567..1c5c32eb0c 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -44,14 +44,9 @@ const ( // TODO: Replace with shared image. autoscalingRunnerSetOwnerKey = ".metadata.controller" LabelKeyRunnerSpecHash = "runner-spec-hash" - LabelKeyAutoScaleRunnerSetName = "auto-scale-runner-set-name" autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" runnerScaleSetIdKey = "runner-scale-set-id" runnerScaleSetRunnerGroupNameKey = "runner-scale-set-runner-group-name" - - // scaleSetListenerLabel is the key of pod.meta.labels to label - // that the pod is a listener application - scaleSetListenerLabel = "runner-scale-set-listener" ) // AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go index a7c58caea5..b8f7512d6d 100644 --- a/controllers/actions.github.com/resourcebuilder.go +++ b/controllers/actions.github.com/resourcebuilder.go @@ -14,16 +14,20 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// secret constants const ( jitTokenKey = "jitToken" ) +// labels applied to resources +const ( + LabelKeyAutoScaleRunnerSetName = "auto-scaling-runner-set-name" + LabelKeyAutoScaleRunnerSetNamespace = "auto-scaling-runner-set-namespace" +) + type resourceBuilder struct{} func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod { - newLabels := map[string]string{} - newLabels[scaleSetListenerLabel] = fmt.Sprintf("%v-%v", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, autoscalingListener.Spec.AutoscalingRunnerSetName) - listenerEnv := []corev1.EnvVar{ { Name: "GITHUB_CONFIGURE_URL", @@ -133,7 +137,10 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A ObjectMeta: metav1.ObjectMeta{ Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace, - Labels: newLabels, + Labels: map[string]string{ + LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, + }, }, Spec: podSpec, } @@ -180,8 +187,8 @@ func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace, Labels: map[string]string{ - "auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - "auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName, + LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, }, }, } @@ -195,8 +202,8 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1. Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Labels: map[string]string{ - "auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - "auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName, + LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, "auto-scaling-listener-namespace": autoscalingListener.Namespace, "auto-scaling-listener-name": autoscalingListener.Name, "role-policy-rules-hash": rulesHash, @@ -229,8 +236,8 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1 Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Labels: map[string]string{ - "auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - "auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, "auto-scaling-listener-namespace": autoscalingListener.Namespace, "auto-scaling-listener-name": autoscalingListener.Name, "role-binding-role-ref-hash": roleRefHash, @@ -252,8 +259,8 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v Name: scaleSetListenerSecretMirrorName(autoscalingListener), Namespace: autoscalingListener.Namespace, Labels: map[string]string{ - "auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - "auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName, + LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, "secret-data-hash": dataHash, }, }, @@ -283,8 +290,8 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1. Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: namespace, Labels: map[string]string{ - "auto-scaling-runner-set-namespace": autoscalingRunnerSet.Namespace, - "auto-scaling-runner-set-name": autoscalingRunnerSet.Name, + LabelKeyAutoScaleRunnerSetNamespace: autoscalingRunnerSet.Namespace, + LabelKeyAutoScaleRunnerSetName: autoscalingRunnerSet.Name, LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), }, }, diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index 50c98af7fc..0e7d8d8aab 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -135,7 +135,7 @@ You can check the logs of the controller pod using the following command: $ kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-runner-scale-set-controller # Runner set listener logs -kubectl logs -n "${NAMESPACE}" -l runner-scale-set-listener=arc-systems-arc-runner-set +kubectl logs -n "${NAMESPACE}" -l auto-scaling-runner-set-namespace=arc-systems -l auto-scaling-runner-set-name=arc-runner-set ``` ### Naming error: `Name must have up to characters` From 151c44d1830743391babc4e5b0eb2d39b749847f Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Thu, 2 Mar 2023 18:55:49 +0000 Subject: [PATCH 102/561] Simplify the setup of controller tests (#2352) --- .../autoscalinglistener_controller_test.go | 110 ++--------- .../autoscalingrunnerset_controller_test.go | 174 +++-------------- .../ephemeralrunner_controller_test.go | 179 +++--------------- .../ephemeralrunnerset_controller_test.go | 106 ++--------- .../actions.github.com/helpers_test.go | 71 +++++++ go.mod | 1 + go.sum | 2 + 7 files changed, 171 insertions(+), 472 deletions(-) create mode 100644 controllers/actions.github.com/helpers_test.go diff --git a/controllers/actions.github.com/autoscalinglistener_controller_test.go b/controllers/actions.github.com/autoscalinglistener_controller_test.go index d5cf3280f4..487d9eb602 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller_test.go +++ b/controllers/actions.github.com/autoscalinglistener_controller_test.go @@ -28,46 +28,23 @@ const ( var _ = Describe("Test AutoScalingListener controller", func() { var ctx context.Context - var cancel context.CancelFunc - autoscalingNS := new(corev1.Namespace) - autoscalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet) - configSecret := new(corev1.Secret) - autoscalingListener := new(actionsv1alpha1.AutoscalingListener) + var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace + var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet + var configSecret *corev1.Secret + var autoscalingListener *actionsv1alpha1.AutoscalingListener BeforeEach(func() { - ctx, cancel = context.WithCancel(context.TODO()) - autoscalingNS = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-listener" + RandStringRunes(5)}, - } - - err := k8sClient.Create(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") - - configSecret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "github-config-secret", - Namespace: autoscalingNS.Name, - }, - Data: map[string][]byte{ - "github_token": []byte(autoscalingListenerTestGitHubToken), - }, - } - - err = k8sClient.Create(ctx, configSecret) - Expect(err).NotTo(HaveOccurred(), "failed to create config secret") - - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoscalingNS.Name, - MetricsBindAddress: "0", - }) - Expect(err).NotTo(HaveOccurred(), "failed to create manager") + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) controller := &AutoscalingListenerReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Log: logf.Log, } - err = controller.SetupWithManager(mgr) + err := controller.SetupWithManager(mgr) Expect(err).NotTo(HaveOccurred(), "failed to setup controller") min := 1 @@ -119,19 +96,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { err = k8sClient.Create(ctx, autoscalingListener) Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingListener") - go func() { - defer GinkgoRecover() - - err := mgr.Start(ctx) - Expect(err).NotTo(HaveOccurred(), "failed to start manager") - }() - }) - - AfterEach(func() { - defer cancel() - - err := k8sClient.Delete(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") + startManagers(GinkgoT(), mgr) }) Context("When creating a new AutoScalingListener", func() { @@ -396,11 +361,11 @@ var _ = Describe("Test AutoScalingListener controller", func() { var _ = Describe("Test AutoScalingListener controller with proxy", func() { var ctx context.Context - var cancel context.CancelFunc - autoscalingNS := new(corev1.Namespace) - autoscalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet) - configSecret := new(corev1.Secret) - autoscalingListener := new(actionsv1alpha1.AutoscalingListener) + var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace + var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet + var configSecret *corev1.Secret + var autoscalingListener *actionsv1alpha1.AutoscalingListener createRunnerSetAndListener := func(proxy *actionsv1alpha1.ProxyConfig) { min := 1 @@ -456,54 +421,19 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() { } BeforeEach(func() { - ctx, cancel = context.WithCancel(context.TODO()) - autoscalingNS = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-listener" + RandStringRunes(5)}, - } - - err := k8sClient.Create(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") - - configSecret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "github-config-secret", - Namespace: autoscalingNS.Name, - }, - Data: map[string][]byte{ - "github_token": []byte(autoscalingListenerTestGitHubToken), - }, - } - - err = k8sClient.Create(ctx, configSecret) - Expect(err).NotTo(HaveOccurred(), "failed to create config secret") - - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoscalingNS.Name, - MetricsBindAddress: "0", - }) - Expect(err).NotTo(HaveOccurred(), "failed to create manager") + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) controller := &AutoscalingListenerReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Log: logf.Log, } - err = controller.SetupWithManager(mgr) + err := controller.SetupWithManager(mgr) Expect(err).NotTo(HaveOccurred(), "failed to setup controller") - go func() { - defer GinkgoRecover() - - err := mgr.Start(ctx) - Expect(err).NotTo(HaveOccurred(), "failed to start manager") - }() - }) - - AfterEach(func() { - defer cancel() - - err := k8sClient.Delete(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") + startManagers(GinkgoT(), mgr) }) It("should create a secret in the listener namespace containing proxy details, use it to populate env vars on the pod and should delete it as part of cleanup", func() { diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index 26918c9ee6..b0cd4c2c54 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -35,38 +35,15 @@ const ( var _ = Describe("Test AutoScalingRunnerSet controller", func() { var ctx context.Context - var cancel context.CancelFunc - autoscalingNS := new(corev1.Namespace) - autoscalingRunnerSet := new(v1alpha1.AutoscalingRunnerSet) - configSecret := new(corev1.Secret) + var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace + var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet + var configSecret *corev1.Secret BeforeEach(func() { - ctx, cancel = context.WithCancel(context.TODO()) - autoscalingNS = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)}, - } - - err := k8sClient.Create(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") - - configSecret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "github-config-secret", - Namespace: autoscalingNS.Name, - }, - Data: map[string][]byte{ - "github_token": []byte(autoscalingRunnerSetTestGitHubToken), - }, - } - - err = k8sClient.Create(ctx, configSecret) - Expect(err).NotTo(HaveOccurred(), "failed to create config secret") - - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoscalingNS.Name, - MetricsBindAddress: "0", - }) - Expect(err).NotTo(HaveOccurred(), "failed to create manager") + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) controller := &AutoscalingRunnerSetReconciler{ Client: mgr.GetClient(), @@ -76,7 +53,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", ActionsClient: fake.NewMultiClient(), } - err = controller.SetupWithManager(mgr) + err := controller.SetupWithManager(mgr) Expect(err).NotTo(HaveOccurred(), "failed to setup controller") min := 1 @@ -108,19 +85,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { err = k8sClient.Create(ctx, autoscalingRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") - go func() { - defer GinkgoRecover() - - err := mgr.Start(ctx) - Expect(err).NotTo(HaveOccurred(), "failed to start manager") - }() - }) - - AfterEach(func() { - defer cancel() - - err := k8sClient.Delete(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") + startManagers(GinkgoT(), mgr) }) Context("When creating a new AutoScalingRunnerSet", func() { @@ -438,36 +403,12 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { var _ = Describe("Test AutoscalingController creation failures", func() { Context("When autoscaling runner set creation fails on the client", func() { var ctx context.Context - var cancel context.CancelFunc - autoscalingNS := new(corev1.Namespace) - configSecret := new(corev1.Secret) + var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace BeforeEach(func() { - ctx, cancel = context.WithCancel(context.TODO()) - autoscalingNS = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)}, - } - - err := k8sClient.Create(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") - - configSecret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "github-config-secret", - Namespace: autoscalingNS.Name, - }, - Data: map[string][]byte{ - "github_token": []byte(autoscalingRunnerSetTestGitHubToken), - }, - } - - err = k8sClient.Create(ctx, configSecret) - Expect(err).NotTo(HaveOccurred(), "failed to create config secret") - - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - MetricsBindAddress: "0", - }) - Expect(err).NotTo(HaveOccurred(), "failed to create manager") + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) controller := &AutoscalingRunnerSetReconciler{ Client: mgr.GetClient(), @@ -477,22 +418,10 @@ var _ = Describe("Test AutoscalingController creation failures", func() { DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", ActionsClient: fake.NewMultiClient(), } - err = controller.SetupWithManager(mgr) + err := controller.SetupWithManager(mgr) Expect(err).NotTo(HaveOccurred(), "failed to setup controller") - go func() { - defer GinkgoRecover() - - err := mgr.Start(ctx) - Expect(err).NotTo(HaveOccurred(), "failed to start manager") - }() - }) - - AfterEach(func() { - defer cancel() - - err := k8sClient.Delete(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") + startManagers(GinkgoT(), mgr) }) It("It should be able to clean up if annotation related to scale set id does not exist", func() { @@ -583,57 +512,17 @@ var _ = Describe("Test AutoscalingController creation failures", func() { var _ = Describe("Test Client optional configuration", func() { Context("When specifying a proxy", func() { var ctx context.Context - var cancel context.CancelFunc - - autoscalingNS := new(corev1.Namespace) - configSecret := new(corev1.Secret) var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace + var configSecret *corev1.Secret + var controller *AutoscalingRunnerSetReconciler BeforeEach(func() { - ctx, cancel = context.WithCancel(context.TODO()) - autoscalingNS = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)}, - } + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) - err := k8sClient.Create(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet") - - configSecret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "github-config-secret", - Namespace: autoscalingNS.Name, - }, - Data: map[string][]byte{ - "github_token": []byte(autoscalingRunnerSetTestGitHubToken), - }, - } - - err = k8sClient.Create(ctx, configSecret) - Expect(err).NotTo(HaveOccurred(), "failed to create config secret") - - mgr, err = ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoscalingNS.Name, - MetricsBindAddress: "0", - }) - Expect(err).NotTo(HaveOccurred(), "failed to create manager") - - go func() { - defer GinkgoRecover() - - err := mgr.Start(ctx) - Expect(err).NotTo(HaveOccurred(), "failed to start manager") - }() - }) - - AfterEach(func() { - defer cancel() - - err := k8sClient.Delete(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet") - }) - - It("should be able to make requests to a server using a proxy", func() { - controller := &AutoscalingRunnerSetReconciler{ + controller = &AutoscalingRunnerSetReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Log: logf.Log, @@ -644,6 +533,10 @@ var _ = Describe("Test Client optional configuration", func() { err := controller.SetupWithManager(mgr) Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + startManagers(GinkgoT(), mgr) + }) + + It("should be able to make requests to a server using a proxy", func() { serverSuccessfullyCalled := false proxy := testserver.New(GinkgoT(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { serverSuccessfullyCalled = true @@ -681,7 +574,7 @@ var _ = Describe("Test Client optional configuration", func() { }, } - err = k8sClient.Create(ctx, autoscalingRunnerSet) + err := k8sClient.Create(ctx, autoscalingRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") // wait for server to be called @@ -695,17 +588,6 @@ var _ = Describe("Test Client optional configuration", func() { }) It("should be able to make requests to a server using a proxy with user info", func() { - controller := &AutoscalingRunnerSetReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Log: logf.Log, - ControllerNamespace: autoscalingNS.Name, - DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", - ActionsClient: actions.NewMultiClient("test", logr.Discard()), - } - err := controller.SetupWithManager(mgr) - Expect(err).NotTo(HaveOccurred(), "failed to setup controller") - serverSuccessfullyCalled := false proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { header := r.Header.Get("Proxy-Authorization") @@ -734,7 +616,7 @@ var _ = Describe("Test Client optional configuration", func() { }, } - err = k8sClient.Create(ctx, secretCredentials) + err := k8sClient.Create(ctx, secretCredentials) Expect(err).NotTo(HaveOccurred(), "failed to create secret credentials") min := 1 diff --git a/controllers/actions.github.com/ephemeralrunner_controller_test.go b/controllers/actions.github.com/ephemeralrunner_controller_test.go index 3f1747ab18..b5e064b1aa 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunner_controller_test.go @@ -22,11 +22,9 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/manager" ) const ( - gh_token = "gh_token" timeout = time.Second * 10 interval = time.Millisecond * 250 runnerImage = "ghcr.io/actions/actions-runner:latest" @@ -87,42 +85,16 @@ func newExampleRunner(name, namespace, configSecretName string) *v1alpha1.Epheme var _ = Describe("EphemeralRunner", func() { Describe("Resource manipulation", func() { var ctx context.Context - var cancel context.CancelFunc - - autoscalingNS := new(corev1.Namespace) - configSecret := new(corev1.Secret) - - controller := new(EphemeralRunnerReconciler) - ephemeralRunner := new(v1alpha1.EphemeralRunner) + var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace + var configSecret *corev1.Secret + var controller *EphemeralRunnerReconciler + var ephemeralRunner *v1alpha1.EphemeralRunner BeforeEach(func() { - ctx, cancel = context.WithCancel(context.Background()) - autoscalingNS = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testns-autoscaling-runner" + RandStringRunes(5), - }, - } - err := k8sClient.Create(ctx, autoscalingNS) - Expect(err).To(BeNil(), "failed to create test namespace for EphemeralRunner") - - configSecret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "github-config-secret", - Namespace: autoscalingNS.Name, - }, - Data: map[string][]byte{ - "github_token": []byte(gh_token), - }, - } - - err = k8sClient.Create(ctx, configSecret) - Expect(err).To(BeNil(), "failed to create config secret") - - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoscalingNS.Name, - MetricsBindAddress: "0", - }) - Expect(err).To(BeNil(), "failed to create manager") + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) controller = &EphemeralRunnerReconciler{ Client: mgr.GetClient(), @@ -131,26 +103,14 @@ var _ = Describe("EphemeralRunner", func() { ActionsClient: fake.NewMultiClient(), } - err = controller.SetupWithManager(mgr) + err := controller.SetupWithManager(mgr) Expect(err).To(BeNil(), "failed to setup controller") ephemeralRunner = newExampleRunner("test-runner", autoscalingNS.Name, configSecret.Name) err = k8sClient.Create(ctx, ephemeralRunner) Expect(err).To(BeNil(), "failed to create ephemeral runner") - go func() { - defer GinkgoRecover() - - err := mgr.Start(ctx) - Expect(err).To(BeNil(), "failed to start manager") - }() - }) - - AfterEach(func() { - defer cancel() - - err := k8sClient.Delete(ctx, autoscalingNS) - Expect(err).To(BeNil(), "failed to delete test namespace for EphemeralRunner") + startManagers(GinkgoT(), mgr) }) It("It should create/add all required resources for EphemeralRunner (finalizer, jit secret)", func() { @@ -668,52 +628,17 @@ var _ = Describe("EphemeralRunner", func() { Describe("Checking the API", func() { var ctx context.Context - var cancel context.CancelFunc - - autoscalingNS := new(corev1.Namespace) - configSecret := new(corev1.Secret) - - var mgr manager.Manager + var autoscalingNS *corev1.Namespace + var configSecret *corev1.Secret + var controller *EphemeralRunnerReconciler + var mgr ctrl.Manager BeforeEach(func() { - ctx, cancel = context.WithCancel(context.Background()) - autoscalingNS = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testns-autoscaling-runner" + RandStringRunes(5), - }, - } - err := k8sClient.Create(ctx, autoscalingNS) - Expect(err).To(BeNil(), "failed to create test namespace for EphemeralRunner") - - configSecret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "github-config-secret", - Namespace: autoscalingNS.Name, - }, - Data: map[string][]byte{ - "github_token": []byte(gh_token), - }, - } - - err = k8sClient.Create(ctx, configSecret) - Expect(err).To(BeNil(), "failed to create config secret") + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) - mgr, err = ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoscalingNS.Name, - MetricsBindAddress: "0", - }) - Expect(err).To(BeNil(), "failed to create manager") - }) - - AfterEach(func() { - defer cancel() - - err := k8sClient.Delete(ctx, autoscalingNS) - Expect(err).To(BeNil(), "failed to delete test namespace for EphemeralRunner") - }) - - It("It should set the Phase to Succeeded", func() { - controller := &EphemeralRunnerReconciler{ + controller = &EphemeralRunnerReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Log: logf.Log, @@ -732,20 +657,16 @@ var _ = Describe("EphemeralRunner", func() { ), ), } - err := controller.SetupWithManager(mgr) Expect(err).To(BeNil(), "failed to setup controller") - go func() { - defer GinkgoRecover() - - err := mgr.Start(ctx) - Expect(err).To(BeNil(), "failed to start manager") - }() + startManagers(GinkgoT(), mgr) + }) + It("It should set the Phase to Succeeded", func() { ephemeralRunner := newExampleRunner("test-runner", autoscalingNS.Name, configSecret.Name) - err = k8sClient.Create(ctx, ephemeralRunner) + err := k8sClient.Create(ctx, ephemeralRunner) Expect(err).To(BeNil()) pod := new(corev1.Pod) @@ -780,40 +701,15 @@ var _ = Describe("EphemeralRunner", func() { Describe("Pod proxy config", func() { var ctx context.Context - var cancel context.CancelFunc - - autoScalingNS := new(corev1.Namespace) - configSecret := new(corev1.Secret) - controller := new(EphemeralRunnerReconciler) + var mgr ctrl.Manager + var autoScalingNS *corev1.Namespace + var configSecret *corev1.Secret + var controller *EphemeralRunnerReconciler BeforeEach(func() { - ctx, cancel = context.WithCancel(context.Background()) - autoScalingNS = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testns-autoscaling-runner" + RandStringRunes(5), - }, - } - err := k8sClient.Create(ctx, autoScalingNS) - Expect(err).To(BeNil(), "failed to create test namespace for EphemeralRunner") - - configSecret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "github-config-secret", - Namespace: autoScalingNS.Name, - }, - Data: map[string][]byte{ - "github_token": []byte(gh_token), - }, - } - - err = k8sClient.Create(ctx, configSecret) - Expect(err).To(BeNil(), "failed to create config secret") - - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoScalingNS.Name, - MetricsBindAddress: "0", - }) - Expect(err).To(BeNil(), "failed to create manager") + ctx = context.Background() + autoScalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoScalingNS.Name) controller = &EphemeralRunnerReconciler{ Client: mgr.GetClient(), @@ -821,23 +717,10 @@ var _ = Describe("EphemeralRunner", func() { Log: logf.Log, ActionsClient: fake.NewMultiClient(), } - - err = controller.SetupWithManager(mgr) + err := controller.SetupWithManager(mgr) Expect(err).To(BeNil(), "failed to setup controller") - go func() { - defer GinkgoRecover() - - err := mgr.Start(ctx) - Expect(err).To(BeNil(), "failed to start manager") - }() - }) - - AfterEach(func() { - defer cancel() - - err := k8sClient.Delete(ctx, autoScalingNS) - Expect(err).To(BeNil(), "failed to delete test namespace for EphemeralRunner") + startManagers(GinkgoT(), mgr) }) It("uses an actions client with proxy transport", func() { diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go index 571492b907..e2ad842b11 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -34,38 +34,15 @@ const ( var _ = Describe("Test EphemeralRunnerSet controller", func() { var ctx context.Context - var cancel context.CancelFunc - autoscalingNS := new(corev1.Namespace) - ephemeralRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) - configSecret := new(corev1.Secret) + var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace + var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet + var configSecret *corev1.Secret BeforeEach(func() { - ctx, cancel = context.WithCancel(context.TODO()) - autoscalingNS = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-runnerset" + RandStringRunes(5)}, - } - - err := k8sClient.Create(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for EphemeralRunnerSet") - - configSecret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "github-config-secret", - Namespace: autoscalingNS.Name, - }, - Data: map[string][]byte{ - "github_token": []byte(ephemeralRunnerSetTestGitHubToken), - }, - } - - err = k8sClient.Create(ctx, configSecret) - Expect(err).NotTo(HaveOccurred(), "failed to create config secret") - - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoscalingNS.Name, - MetricsBindAddress: "0", - }) - Expect(err).NotTo(HaveOccurred(), "failed to create manager") + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) controller := &EphemeralRunnerSetReconciler{ Client: mgr.GetClient(), @@ -73,7 +50,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { Log: logf.Log, ActionsClient: fake.NewMultiClient(), } - err = controller.SetupWithManager(mgr) + err := controller.SetupWithManager(mgr) Expect(err).NotTo(HaveOccurred(), "failed to setup controller") ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ @@ -103,19 +80,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Create(ctx, ephemeralRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet") - go func() { - defer GinkgoRecover() - - err := mgr.Start(ctx) - Expect(err).NotTo(HaveOccurred(), "failed to start manager") - }() - }) - - AfterEach(func() { - defer cancel() - - err := k8sClient.Delete(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for EphemeralRunnerSet") + startManagers(GinkgoT(), mgr) }) Context("When creating a new EphemeralRunnerSet", func() { @@ -595,38 +560,15 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func() { var ctx context.Context - var cancel context.CancelFunc - autoscalingNS := new(corev1.Namespace) - ephemeralRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) - configSecret := new(corev1.Secret) + var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace + var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet + var configSecret *corev1.Secret BeforeEach(func() { - ctx, cancel = context.WithCancel(context.TODO()) - autoscalingNS = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-runnerset" + RandStringRunes(5)}, - } - - err := k8sClient.Create(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for EphemeralRunnerSet") - - configSecret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "github-config-secret", - Namespace: autoscalingNS.Name, - }, - Data: map[string][]byte{ - "github_token": []byte(ephemeralRunnerSetTestGitHubToken), - }, - } - - err = k8sClient.Create(ctx, configSecret) - Expect(err).NotTo(HaveOccurred(), "failed to create config secret") - - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Namespace: autoscalingNS.Name, - MetricsBindAddress: "0", - }) - Expect(err).NotTo(HaveOccurred(), "failed to create manager") + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) controller := &EphemeralRunnerSetReconciler{ Client: mgr.GetClient(), @@ -634,22 +576,10 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( Log: logf.Log, ActionsClient: actions.NewMultiClient("test", logr.Discard()), } - err = controller.SetupWithManager(mgr) + err := controller.SetupWithManager(mgr) Expect(err).NotTo(HaveOccurred(), "failed to setup controller") - go func() { - defer GinkgoRecover() - - err := mgr.Start(ctx) - Expect(err).NotTo(HaveOccurred(), "failed to start manager") - }() - }) - - AfterEach(func() { - defer cancel() - - err := k8sClient.Delete(ctx, autoscalingNS) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for EphemeralRunnerSet") + startManagers(GinkgoT(), mgr) }) It("should create a proxy secret and delete the proxy secreat after the runner-set is deleted", func() { diff --git a/controllers/actions.github.com/helpers_test.go b/controllers/actions.github.com/helpers_test.go new file mode 100644 index 0000000000..4adbec6133 --- /dev/null +++ b/controllers/actions.github.com/helpers_test.go @@ -0,0 +1,71 @@ +package actionsgithubcom + +import ( + "context" + + "github.com/onsi/ginkgo/v2" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +const defaultGitHubToken = "gh_token" + +func startManagers(t ginkgo.GinkgoTInterface, first manager.Manager, others ...manager.Manager) { + for _, mgr := range append([]manager.Manager{first}, others...) { + ctx, cancel := context.WithCancel(context.Background()) + + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { + return mgr.Start(ctx) + }) + + t.Cleanup(func() { + cancel() + require.NoError(t, g.Wait()) + }) + } +} + +func createNamespace(t ginkgo.GinkgoTInterface, client client.Client) (*corev1.Namespace, manager.Manager) { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)}, + } + + err := k8sClient.Create(context.Background(), ns) + require.NoError(t, err) + + t.Cleanup(func() { + err := k8sClient.Delete(context.Background(), ns) + require.NoError(t, err) + }) + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Namespace: ns.Name, + MetricsBindAddress: "0", + }) + require.NoError(t, err) + + return ns, mgr +} + +func createDefaultSecret(t ginkgo.GinkgoTInterface, client client.Client, namespace string) *corev1.Secret { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-config-secret", + Namespace: namespace, + }, + Data: map[string][]byte{ + "github_token": []byte(defaultGitHubToken), + }, + } + + err := k8sClient.Create(context.Background(), secret) + require.NoError(t, err) + + return secret +} diff --git a/go.mod b/go.mod index 2e4a792f5c..c21469d385 100644 --- a/go.mod +++ b/go.mod @@ -28,6 +28,7 @@ require ( go.uber.org/zap v1.24.0 golang.org/x/net v0.7.0 golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 gomodules.xyz/jsonpatch/v2 v2.2.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.26.1 diff --git a/go.sum b/go.sum index 7a42ba8130..2d2dfbee73 100644 --- a/go.sum +++ b/go.sum @@ -473,6 +473,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From 0464be2e12a92dbece494a9eae64a43a26ea4647 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 3 Mar 2023 05:43:03 -0500 Subject: [PATCH 103/561] Update runner to version 2.302.1 (#2294) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- Makefile | 2 +- runner/Makefile | 2 +- runner/VERSION | 2 +- test/e2e/e2e_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 7bb20d4263..e42d2ae79f 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ else endif DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) VERSION ?= dev -RUNNER_VERSION ?= 2.301.1 +RUNNER_VERSION ?= 2.302.1 TARGETPLATFORM ?= $(shell arch) RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_TAG ?= ${VERSION} diff --git a/runner/Makefile b/runner/Makefile index 1b59f237e1..acd5fd7091 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) -RUNNER_VERSION ?= 2.301.1 +RUNNER_VERSION ?= 2.302.1 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.2.0 DOCKER_VERSION ?= 20.10.21 diff --git a/runner/VERSION b/runner/VERSION index b886075fa2..22155b3fbc 100644 --- a/runner/VERSION +++ b/runner/VERSION @@ -1 +1 @@ -2.301.1 +2.302.1 diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 9400381fc0..e4c3a233bc 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -41,7 +41,7 @@ var ( testResultCMNamePrefix = "test-result-" - RunnerVersion = "2.301.1" + RunnerVersion = "2.302.1" ) // If you're willing to run this test via VS Code "run test" or "debug test", From 789a741ddafdcee4197130d73451f0bd816c0d5c Mon Sep 17 00:00:00 2001 From: Ava Stancu Date: Fri, 3 Mar 2023 11:55:02 +0100 Subject: [PATCH 104/561] Added e2e workflow trigger on master push and on PRs (#2356) --- .github/workflows/e2e-test-linux-vm.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 09161e6943..85c282d38b 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -1,6 +1,10 @@ name: CI ARC E2E Linux VM Test on: + push: + branches: + - master + pull_request: workflow_dispatch: env: From 652233c7bb22ec208d611f9d0e2c7bfe313433a5 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Fri, 3 Mar 2023 12:00:18 +0100 Subject: [PATCH 105/561] Upgrading & pinning action versions (#2346) --- .github/workflows/publish-runner-scale-set.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish-runner-scale-set.yaml b/.github/workflows/publish-runner-scale-set.yaml index d32a3c0320..0508b3c560 100644 --- a/.github/workflows/publish-runner-scale-set.yaml +++ b/.github/workflows/publish-runner-scale-set.yaml @@ -129,7 +129,8 @@ jobs: echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT - name: Set up Helm - uses: azure/setup-helm@v3.3 + # Using https://github.com/Azure/setup-helm/releases/tag/v3.5 + uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 with: version: ${{ env.HELM_VERSION }} @@ -176,7 +177,8 @@ jobs: echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT - name: Set up Helm - uses: azure/setup-helm@v3.3 + # Using https://github.com/Azure/setup-helm/releases/tag/v3.5 + uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 with: version: ${{ env.HELM_VERSION }} From 61149c3b8ef59e77032fb45e330042d31147dd51 Mon Sep 17 00:00:00 2001 From: Chris Patterson Date: Fri, 3 Mar 2023 08:36:14 -0500 Subject: [PATCH 106/561] Adding parameter to configure the runner set name. (#2279) Co-authored-by: TingluoHuang --- .../v1alpha1/autoscalingrunnerset_types.go | 5 + ...ions.github.com_autoscalingrunnersets.yaml | 2 + .../templates/autoscalingrunnerset.yaml | 3 + .../tests/template_test.go | 47 +++++++ charts/gha-runner-scale-set/values.yaml | 3 + ...ions.github.com_autoscalingrunnersets.yaml | 2 + .../autoscalinglistener_controller.go | 1 + .../autoscalingrunnerset_controller.go | 66 +++++++++- .../autoscalingrunnerset_controller_test.go | 123 ++++++++++++++++++ github/actions/fake/client.go | 7 + 10 files changed, 252 insertions(+), 7 deletions(-) diff --git a/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go index ba8af2fcbb..8f2bf1024c 100644 --- a/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go +++ b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go @@ -57,6 +57,9 @@ type AutoscalingRunnerSetSpec struct { // +optional RunnerGroup string `json:"runnerGroup,omitempty"` + // +optional + RunnerScaleSetName string `json:"runnerScaleSetName,omitempty"` + // +optional Proxy *ProxyConfig `json:"proxy,omitempty"` @@ -205,6 +208,7 @@ func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string { GitHubConfigUrl string GitHubConfigSecret string RunnerGroup string + RunnerScaleSetName string Proxy *ProxyConfig GitHubServerTLS *GitHubServerTLSConfig Template corev1.PodTemplateSpec @@ -213,6 +217,7 @@ func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string { GitHubConfigUrl: ars.Spec.GitHubConfigUrl, GitHubConfigSecret: ars.Spec.GitHubConfigSecret, RunnerGroup: ars.Spec.RunnerGroup, + RunnerScaleSetName: ars.Spec.RunnerScaleSetName, Proxy: ars.Spec.Proxy, GitHubServerTLS: ars.Spec.GitHubServerTLS, Template: ars.Spec.Template, diff --git a/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml index 0077540675..12c4b5b837 100644 --- a/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml +++ b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml @@ -86,6 +86,8 @@ spec: type: object runnerGroup: type: string + runnerScaleSetName: + type: string template: description: Required properties: diff --git a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index 478fc775ca..6ad3c6c8a2 100644 --- a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -17,6 +17,9 @@ spec: {{- with .Values.runnerGroup }} runnerGroup: {{ . }} {{- end }} + {{- with .Values.runnerScaleSetName }} + runnerScaleSetName: {{ . }} + {{- end }} {{- if .Values.proxy }} proxy: diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index c9939415f9..e6995dff10 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -310,6 +310,53 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) { assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) } +func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "runnerScaleSetName": "test-runner-scale-set-name", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, namespaceName, ars.Namespace) + assert.Equal(t, "test-runners", ars.Name) + + assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) + assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) + assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret) + assert.Equal(t, "test-runner-scale-set-name", ars.Spec.RunnerScaleSetName) + + assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty") + + assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil") + assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil") + assert.Nil(t, ars.Spec.Proxy, "Proxy should be nil") + assert.Nil(t, ars.Spec.GitHubServerTLS, "GitHubServerTLS should be nil") + + assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil") + + assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container") + assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) +} + func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) { t.Parallel() diff --git a/charts/gha-runner-scale-set/values.yaml b/charts/gha-runner-scale-set/values.yaml index f3bbc21476..8845daa8a4 100644 --- a/charts/gha-runner-scale-set/values.yaml +++ b/charts/gha-runner-scale-set/values.yaml @@ -44,6 +44,9 @@ githubConfigSecret: # runnerGroup: "default" +## name of the runner scale set to create. Defaults to the helm release name +# runnerScaleSetName: "" + ## template is the PodSpec for each runner Pod template: spec: diff --git a/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml index 0077540675..12c4b5b837 100644 --- a/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml +++ b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml @@ -86,6 +86,8 @@ spec: type: object runnerGroup: type: string + runnerScaleSetName: + type: string template: description: Required properties: diff --git a/controllers/actions.github.com/autoscalinglistener_controller.go b/controllers/actions.github.com/autoscalinglistener_controller.go index 3110a74818..5ad3f0c3df 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller.go +++ b/controllers/actions.github.com/autoscalinglistener_controller.go @@ -99,6 +99,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl. } log.Info("Successfully removed finalizer after cleanup") + return ctrl.Result{}, nil } if !controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) { diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 1c5c32eb0c..1c77acd904 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -46,6 +46,7 @@ const ( LabelKeyRunnerSpecHash = "runner-spec-hash" autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" runnerScaleSetIdKey = "runner-scale-set-id" + runnerScaleSetNameKey = "runner-scale-set-name" runnerScaleSetRunnerGroupNameKey = "runner-scale-set-runner-group-name" ) @@ -123,6 +124,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl } log.Info("Successfully removed finalizer after cleanup") + return ctrl.Result{}, nil } if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) { @@ -158,6 +160,13 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl return r.updateRunnerScaleSetRunnerGroup(ctx, autoscalingRunnerSet, log) } + // Make sure the runner scale set name is up to date + currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameKey] + if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) { + log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.") + return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log) + } + secret := new(corev1.Secret) if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, secret); err != nil { log.Error(err, "Failed to find GitHub config secret.", @@ -297,11 +306,14 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) { logger.Info("Creating a new runner scale set") actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) + if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 { + autoscalingRunnerSet.Spec.RunnerScaleSetName = autoscalingRunnerSet.Name + } if err != nil { logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set") return ctrl.Result{}, err } - runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, autoscalingRunnerSet.Name) + runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, autoscalingRunnerSet.Spec.RunnerScaleSetName) if err != nil { logger.Error(err, "Failed to get runner scale set from Actions service") return ctrl.Result{}, err @@ -322,11 +334,11 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex runnerScaleSet, err = actionsClient.CreateRunnerScaleSet( ctx, &actions.RunnerScaleSet{ - Name: autoscalingRunnerSet.Name, + Name: autoscalingRunnerSet.Spec.RunnerScaleSetName, RunnerGroupId: runnerGroupId, Labels: []actions.Label{ { - Name: autoscalingRunnerSet.Name, + Name: autoscalingRunnerSet.Spec.RunnerScaleSetName, Type: "System", }, }, @@ -346,16 +358,20 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex autoscalingRunnerSet.Annotations = map[string]string{} } - logger.Info("Adding runner scale set ID and runner group name as an annotation") + logger.Info("Adding runner scale set ID, name and runner group name as an annotation") if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { + obj.Annotations[runnerScaleSetNameKey] = runnerScaleSet.Name obj.Annotations[runnerScaleSetIdKey] = strconv.Itoa(runnerScaleSet.Id) obj.Annotations[runnerScaleSetRunnerGroupNameKey] = runnerScaleSet.RunnerGroupName }); err != nil { - logger.Error(err, "Failed to add runner scale set ID and runner group name as an annotation") + logger.Error(err, "Failed to add runner scale set ID, name and runner group name as an annotation") return ctrl.Result{}, err } - logger.Info("Updated with runner scale set ID and runner group name as an annotation") + logger.Info("Updated with runner scale set ID, name and runner group name as an annotation", + "id", runnerScaleSet.Id, + "name", runnerScaleSet.Name, + "runnerGroupName", runnerScaleSet.RunnerGroupName) return ctrl.Result{}, nil } @@ -383,7 +399,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con runnerGroupId = int(runnerGroup.ID) } - updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetId, &actions.RunnerScaleSet{Name: autoscalingRunnerSet.Name, RunnerGroupId: runnerGroupId}) + updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetId, &actions.RunnerScaleSet{RunnerGroupId: runnerGroupId}) if err != nil { logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetId) return ctrl.Result{}, err @@ -401,6 +417,42 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con return ctrl.Result{}, nil } +func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) { + runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) + if err != nil { + logger.Error(err, "Failed to parse runner scale set ID") + return ctrl.Result{}, err + } + + if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 { + logger.Info("Runner scale set name is not specified, skipping") + return ctrl.Result{}, nil + } + + actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) + if err != nil { + logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set") + return ctrl.Result{}, err + } + + updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetId, &actions.RunnerScaleSet{Name: autoscalingRunnerSet.Spec.RunnerScaleSetName}) + if err != nil { + logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetId) + return ctrl.Result{}, err + } + + logger.Info("Updating runner scale set name as an annotation") + if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { + obj.Annotations[runnerScaleSetNameKey] = updatedRunnerScaleSet.Name + }); err != nil { + logger.Error(err, "Failed to update runner scale set name annotation") + return ctrl.Result{}, err + } + + logger.Info("Updated runner scale set with match name", "name", updatedRunnerScaleSet.Name) + return ctrl.Result{}, nil +} + func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error { logger.Info("Deleting the runner scale set from Actions service") runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index b0cd4c2c54..e999fc7e6c 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -400,6 +400,129 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { }) }) +var _ = Describe("Test AutoScalingController updates", func() { + Context("Creating autoscaling runner set with RunnerScaleSetName set", func() { + var ctx context.Context + var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace + var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet + var configSecret *corev1.Secret + + BeforeEach(func() { + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) + + controller := &AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ControllerNamespace: autoscalingNS.Name, + DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", + ActionsClient: fake.NewMultiClient( + fake.WithDefaultClient( + fake.NewFakeClient( + fake.WithUpdateRunnerScaleSet( + &actions.RunnerScaleSet{ + Id: 1, + Name: "testset_update", + RunnerGroupId: 1, + RunnerGroupName: "testgroup", + Labels: []actions.Label{{Type: "test", Name: "test"}}, + RunnerSetting: actions.RunnerSetting{}, + CreatedOn: time.Now(), + RunnerJitConfigUrl: "test.test.test", + Statistics: nil, + }, + nil, + ), + ), + nil, + ), + ), + } + err := controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + startManagers(GinkgoT(), mgr) + }) + + It("It should be create AutoScalingRunnerSet and has annotation for the RunnerScaleSetName", func() { + min := 1 + max := 10 + autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + MaxRunners: &max, + MinRunners: &min, + RunnerScaleSetName: "testset", + RunnerGroup: "testgroup", + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + err := k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + // Wait for the AutoScalingRunnerSet to be created with right annotation + ars := new(v1alpha1.AutoscalingRunnerSet) + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, ars) + if err != nil { + return "", err + } + + if val, ok := ars.Annotations[runnerScaleSetNameKey]; ok { + return val, nil + } + + return "", nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo(autoscalingRunnerSet.Spec.RunnerScaleSetName), "AutoScalingRunnerSet should have annotation for the RunnerScaleSetName") + + update := autoscalingRunnerSet.DeepCopy() + update.Spec.RunnerScaleSetName = "testset_update" + err = k8sClient.Patch(ctx, update, client.MergeFrom(autoscalingRunnerSet)) + Expect(err).NotTo(HaveOccurred(), "failed to update AutoScalingRunnerSet") + + // Wait for the AutoScalingRunnerSet to be updated with right annotation + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, ars) + if err != nil { + return "", err + } + + if val, ok := ars.Annotations[runnerScaleSetNameKey]; ok { + return val, nil + } + + return "", nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo(update.Spec.RunnerScaleSetName), "AutoScalingRunnerSet should have a updated annotation for the RunnerScaleSetName") + }) + }) +}) + var _ = Describe("Test AutoscalingController creation failures", func() { Context("When autoscaling runner set creation fails on the client", func() { var ctx context.Context diff --git a/github/actions/fake/client.go b/github/actions/fake/client.go index 5d7e22b790..0729425d54 100644 --- a/github/actions/fake/client.go +++ b/github/actions/fake/client.go @@ -38,6 +38,13 @@ func WithCreateRunnerScaleSet(scaleSet *actions.RunnerScaleSet, err error) Optio } } +func WithUpdateRunnerScaleSet(scaleSet *actions.RunnerScaleSet, err error) Option { + return func(f *FakeClient) { + f.updateRunnerScaleSetResult.RunnerScaleSet = scaleSet + f.updateRunnerScaleSetResult.err = err + } +} + var defaultRunnerScaleSet = &actions.RunnerScaleSet{ Id: 1, Name: "testset", From 0af43dfbe61c0821cc10f02cc7bb9d7482436e87 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Fri, 3 Mar 2023 23:18:07 +0900 Subject: [PATCH 107/561] Correct and simplify a sentence in the scheduled overrides doc (#2323) --- docs/automatically-scaling-runners.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/automatically-scaling-runners.md b/docs/automatically-scaling-runners.md index fc860063ef..05c0214fda 100644 --- a/docs/automatically-scaling-runners.md +++ b/docs/automatically-scaling-runners.md @@ -491,7 +491,7 @@ In case you have a more complex scenario, try writing two or more entries under The earlier entry is prioritized higher than later entries. So you usually define one-time overrides at the top of your list, then yearly, monthly, weekly, and lastly daily overrides. -A common use case for this may be to have 1 override to scale to 0 during the week outside of core business hours and another override to scale to 0 during all hours of the weekend. +A common use case for this may be to have 1 override to scale to 0 during non-working hours and another override to scale to 0 on weekends. ## Configuring automatic termination From d4bd6840f0ac0ab6a6c201b4541fd1b5a1bce717 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Fri, 3 Mar 2023 13:05:51 -0500 Subject: [PATCH 108/561] ADR for Limit cluster role permission on Secerts. (#2275) --- ...023-02-10-limit-manager-role-permission.md | 95 +++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 adrs/2023-02-10-limit-manager-role-permission.md diff --git a/adrs/2023-02-10-limit-manager-role-permission.md b/adrs/2023-02-10-limit-manager-role-permission.md new file mode 100644 index 0000000000..879f1e07c7 --- /dev/null +++ b/adrs/2023-02-10-limit-manager-role-permission.md @@ -0,0 +1,95 @@ +# ADR 0007: Limit Permissions for Service Accounts in Actions-Runner-Controller +**Date**: 2023-02-10 + +**Status**: Pending + +## Context + +- `actions-runner-controller` is a Kubernetes CRD (with controller) built using https://github.com/kubernetes-sigs/controller-runtime + +- [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) has a default cache based k8s API client.Reader to make query k8s API server more efficiency. + +- The cache-based API client requires cluster scope `list` and `watch` permission for any resource the controller may query. + +- This documentation only scopes to the AutoscalingRunnerSet CRD and its controller. + +## Service accounts and their role binding in actions-runner-controller + +There are 3 service accounts involved for a working `AutoscalingRunnerSet` based `actions-runner-controller` + +1. Service account for each Ephemeral runner Pod + +This should have the lowest privilege (not any `RoleBinding` nor `ClusterRoleBinding`) by default, in the case of `containerMode=kubernetes`, it will get certain write permission with `RoleBinding` to limit the permission to a single namespace. + +> References: +> - ./charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml +> - ./charts/gha-runner-scale-set/templates/kube_mode_role.yaml +> - ./charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml +> - ./charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml + +2. Service account for AutoScalingListener Pod + +This has a `RoleBinding` to a single namespace with a `Role` that has permission to `PATCH` `EphemeralRunnerSet` and `EphemeralRunner`. + +3. Service account for the controller manager + +Since the CRD controller is a singleton installed in the cluster that manages the CRD across multiple namespaces by default, the service account of the controller manager pod has a `ClusterRoleBinding` to a `ClusterRole` with broader permissions. + +The current `ClusterRole` has the following permissions: + +- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingRunnerSets` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingListeners` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunnerSets` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunners` (with `Status` and `Finalizer` sub-resource) + +- Get/List/Create/Delete/Update/Patch/Watch on `Pods` (with `Status` sub-resource) +- **Get/List/Create/Delete/Update/Patch/Watch on `Secrets`** +- Get/List/Create/Delete/Update/Patch/Watch on `Roles` +- Get/List/Create/Delete/Update/Patch/Watch on `RoleBindings` +- Get/List/Create/Delete/Update/Patch/Watch on `ServiceAccounts` + +> Full list can be found at: https://github.com/actions/actions-runner-controller/blob/facae69e0b189d3b5dd659f36df8a829516d2896/charts/actions-runner-controller-2/templates/manager_role.yaml + +## Limit cluster role permission on Secrets + +The cluster scope `List` `Secrets` permission might be a blocker for adopting `actions-runner-controller` for certain customers as they may have certain restriction in their cluster that simply doesn't allow any service account to have cluster scope `List Secrets` permission. + +To help these customers and improve security for `actions-runner-controller` in general, we will try to limit the `ClusterRole` permission of the controller manager's service account down to the following: + +- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingRunnerSets` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingListeners` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunnerSets` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunners` (with `Status` and `Finalizer` sub-resource) + +- List/Watch on `Pods` +- List/Watch on `Roles` +- List/Watch on `RoleBindings` +- List/Watch on `ServiceAccounts` + +> We will change the default cache-based client to bypass cache on reading `Secrets`, so we can eliminate the need for `List` and `Watch` `Secrets` permission in cluster scope. + +Introduce a new `Role` per `AutoScalingRunnerSet` installation and `RoleBinding` the `Role` with the controller's service account in the namespace that each `AutoScalingRunnerSet` deployed with the following permission. + +- Get/Create/Delete/Update/Patch on `Secrets` +- Get/Create/Delete/Update/Patch on `Pods` +- Get/Create/Delete/Update/Patch on `Roles` +- Get/Create/Delete/Update/Patch on `RoleBindings` +- Get/Create/Delete/Update/Patch on `ServiceAccounts` + +The `Role` and `RoleBinding` creation will happen during `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set` to grant the controller's service account required permissions to operate in the namespace the `AutoScalingRunnerSet` deployed. + +## Install ARC to only watch/react resources in a single namespace + +In case the user doesn't want to have any `ClusterRole`, they can choose to install the `actions-runner-controller` in a mode that only requires a `Role` with `RoleBinding` in a particular namespace. + +In this mode, the `actions-runner-controller` will only be able to watch the `AutoScalingRunnerSet` resource in a single namespace. + +If you want to deploy multiple `AutoScalingRunnerSet` into different namespaces, you will need to install `actions-runner-controller` in this mode multiple times as well and have each installation watch the namespace you want to deploy an `AutoScalingRunnerSet` + +You will install `actions-runner-controller` with something like `helm install arc --set watchSingleNamespace=TestNamespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller` (the `TestNamespace` namespace needs to be created first). + +You will deploy the `AutoScalingRunnerSet` with something like `helm install demo --namespace TestNamespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set` + +In this mode, you will end up with a manager `Role` that has all Get/List/Create/Delete/Update/Patch/Watch permissions on resources we need, and a `RoleBinding` to bind the `Role` with the controller `ServiceAccount` in the watched single namespace, ex: `TestNamespace` in the above example. + +The downside of this mode is when you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other. \ No newline at end of file From 71c8eac90571de7fa46d0d05925882accb88013d Mon Sep 17 00:00:00 2001 From: Piotr Palka Date: Tue, 7 Mar 2023 02:20:46 +0700 Subject: [PATCH 109/561] Fix webhook server logging (#2320) Co-authored-by: Yusuke Kuoka Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- cmd/githubwebhookserver/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/githubwebhookserver/main.go b/cmd/githubwebhookserver/main.go index 2845772032..1245b8230b 100644 --- a/cmd/githubwebhookserver/main.go +++ b/cmd/githubwebhookserver/main.go @@ -124,7 +124,7 @@ func main() { if watchNamespace == "" { logger.Info("-watch-namespace is empty. HorizontalRunnerAutoscalers in all the namespaces are watched, cached, and considered as scale targets.") } else { - logger.Info("-watch-namespace is %q. Only HorizontalRunnerAutoscalers in %q are watched, cached, and considered as scale targets.") + logger.Info("-watch-namespace is %q. Only HorizontalRunnerAutoscalers in %q are watched, cached, and considered as scale targets.", watchNamespace, watchNamespace) } ctrl.SetLogger(logger) From 8cff895acc7a20181d4d017bd791f5355abf44bf Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Mon, 6 Mar 2023 19:21:22 +0000 Subject: [PATCH 110/561] Apply proxy settings from environment in listener (#2366) Co-authored-by: Tingluo Huang --- cmd/githubrunnerscalesetlistener/main.go | 16 ++- cmd/githubrunnerscalesetlistener/main_test.go | 109 ++++++++++++++++++ 2 files changed, 123 insertions(+), 2 deletions(-) diff --git a/cmd/githubrunnerscalesetlistener/main.go b/cmd/githubrunnerscalesetlistener/main.go index ed9f145f33..3813617a79 100644 --- a/cmd/githubrunnerscalesetlistener/main.go +++ b/cmd/githubrunnerscalesetlistener/main.go @@ -19,6 +19,8 @@ package main import ( "context" "fmt" + "net/http" + "net/url" "os" "os/signal" "syscall" @@ -28,6 +30,7 @@ import ( "github.com/actions/actions-runner-controller/logging" "github.com/go-logr/logr" "github.com/kelseyhightower/envconfig" + "golang.org/x/net/http/httpproxy" ) type RunnerScaleSetListenerConfig struct { @@ -84,8 +87,8 @@ func run(rc RunnerScaleSetListenerConfig, logger logr.Logger) error { } } - actionsServiceClient, err := actions.NewClient( - rc.ConfigureUrl, + actionsServiceClient, err := newActionsClientFromConfig( + rc, creds, actions.WithUserAgent(fmt.Sprintf("actions-runner-controller/%s", build.Version)), actions.WithLogger(logger), @@ -155,3 +158,12 @@ func validateConfig(config *RunnerScaleSetListenerConfig) error { return nil } + +func newActionsClientFromConfig(config RunnerScaleSetListenerConfig, creds *actions.ActionsAuth, options ...actions.ClientOption) (*actions.Client, error) { + proxyFunc := httpproxy.FromEnvironment().ProxyFunc() + options = append(options, actions.WithProxy(func(req *http.Request) (*url.URL, error) { + return proxyFunc(req.URL) + })) + + return actions.NewClient(config.ConfigureUrl, creds, options...) +} diff --git a/cmd/githubrunnerscalesetlistener/main_test.go b/cmd/githubrunnerscalesetlistener/main_test.go index bd2d71879f..619bfeb69f 100644 --- a/cmd/githubrunnerscalesetlistener/main_test.go +++ b/cmd/githubrunnerscalesetlistener/main_test.go @@ -2,9 +2,15 @@ package main import ( "fmt" + "net/http" + "net/http/httptest" + "os" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/actions/actions-runner-controller/github/actions" ) func TestConfigValidationMinMax(t *testing.T) { @@ -90,3 +96,106 @@ func TestConfigValidationConfigUrl(t *testing.T) { assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl") } + +func TestProxySettings(t *testing.T) { + t.Run("http", func(t *testing.T) { + wentThroughProxy := false + + proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wentThroughProxy = true + })) + t.Cleanup(func() { + proxy.Close() + }) + + prevProxy := os.Getenv("http_proxy") + os.Setenv("http_proxy", proxy.URL) + defer os.Setenv("http_proxy", prevProxy) + + config := RunnerScaleSetListenerConfig{ + ConfigureUrl: "https://github.com/org/repo", + } + creds := &actions.ActionsAuth{ + Token: "token", + } + + client, err := newActionsClientFromConfig(config, creds) + require.NoError(t, err) + + req, err := http.NewRequest(http.MethodGet, "http://example.com", nil) + require.NoError(t, err) + _, err = client.Do(req) + require.NoError(t, err) + + assert.True(t, wentThroughProxy) + }) + + t.Run("https", func(t *testing.T) { + wentThroughProxy := false + + proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wentThroughProxy = true + })) + t.Cleanup(func() { + proxy.Close() + }) + + prevProxy := os.Getenv("https_proxy") + os.Setenv("https_proxy", proxy.URL) + defer os.Setenv("https_proxy", prevProxy) + + config := RunnerScaleSetListenerConfig{ + ConfigureUrl: "https://github.com/org/repo", + } + creds := &actions.ActionsAuth{ + Token: "token", + } + + client, err := newActionsClientFromConfig(config, creds, actions.WithRetryMax(0)) + require.NoError(t, err) + + req, err := http.NewRequest(http.MethodGet, "https://example.com", nil) + require.NoError(t, err) + + _, err = client.Do(req) + // proxy doesn't support https + assert.Error(t, err) + assert.True(t, wentThroughProxy) + }) + + t.Run("no_proxy", func(t *testing.T) { + wentThroughProxy := false + + proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wentThroughProxy = true + })) + t.Cleanup(func() { + proxy.Close() + }) + + prevProxy := os.Getenv("http_proxy") + os.Setenv("http_proxy", proxy.URL) + defer os.Setenv("http_proxy", prevProxy) + + prevNoProxy := os.Getenv("no_proxy") + os.Setenv("no_proxy", "example.com") + defer os.Setenv("no_proxy", prevNoProxy) + + config := RunnerScaleSetListenerConfig{ + ConfigureUrl: "https://github.com/org/repo", + } + creds := &actions.ActionsAuth{ + Token: "token", + } + + client, err := newActionsClientFromConfig(config, creds) + require.NoError(t, err) + + req, err := http.NewRequest(http.MethodGet, "http://example.com", nil) + require.NoError(t, err) + + _, err = client.Do(req) + require.NoError(t, err) + assert.False(t, wentThroughProxy) + }) +} From 7a323facf932325e13f89d5e5fc346b3b06cc18e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 07:38:53 +0900 Subject: [PATCH 111/561] chore(deps): bump github.com/teambition/rrule-go from 1.8.0 to 1.8.2 (#2230) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c21469d385..2f913701e8 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/stretchr/testify v1.8.2 - github.com/teambition/rrule-go v1.8.0 + github.com/teambition/rrule-go v1.8.2 go.uber.org/multierr v1.7.0 go.uber.org/zap v1.24.0 golang.org/x/net v0.7.0 diff --git a/go.sum b/go.sum index 2d2dfbee73..1f1c989649 100644 --- a/go.sum +++ b/go.sum @@ -354,8 +354,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/teambition/rrule-go v1.8.0 h1:a/IX5s56hGkFF+nRlJUooZU/45OTeeldBGL29nDKIHw= -github.com/teambition/rrule-go v1.8.0/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4= +github.com/teambition/rrule-go v1.8.2 h1:lIjpjvWTj9fFUZCmuoVDrKVOtdiyzbzc93qTmRVe/J8= +github.com/teambition/rrule-go v1.8.2/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4= github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= From 0686a00ce489a326267caf04eddf845daf0a92f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 07:39:31 +0900 Subject: [PATCH 112/561] chore(deps): bump github.com/gruntwork-io/terratest from 0.41.9 to 0.41.11 (#2335) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2f913701e8..e4aa1142b4 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 - github.com/gruntwork-io/terratest v0.41.9 + github.com/gruntwork-io/terratest v0.41.11 github.com/hashicorp/go-retryablehttp v0.7.2 github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 diff --git a/go.sum b/go.sum index 1f1c989649..88afb0f782 100644 --- a/go.sum +++ b/go.sum @@ -204,8 +204,8 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro= github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78= -github.com/gruntwork-io/terratest v0.41.9 h1:jyygu23iLcEFjGQhlvRx4R0EJVqOoriP+Ire4U9cZA0= -github.com/gruntwork-io/terratest v0.41.9/go.mod h1:qH1xkPTTGx30XkMHw8jAVIbzqheSjIa5IyiTwSV2vKI= +github.com/gruntwork-io/terratest v0.41.11 h1:EAHiK6PFWJCVkgW2yUompjSsZQzA0CfBcuqIaXtZdpk= +github.com/gruntwork-io/terratest v0.41.11/go.mod h1:qH1xkPTTGx30XkMHw8jAVIbzqheSjIa5IyiTwSV2vKI= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= From 608f6430af5df737481e2b6f6f0f0897aed4cdce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 11:23:37 +0100 Subject: [PATCH 113/561] chore(deps): bump github.com/golang-jwt/jwt/v4 from 4.4.1 to 4.5.0 (#2367) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index e4aa1142b4..0b92c55384 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/evanphx/json-patch v4.12.0+incompatible github.com/go-logr/logr v1.2.3 - github.com/golang-jwt/jwt/v4 v4.4.1 + github.com/golang-jwt/jwt/v4 v4.5.0 github.com/google/go-cmp v0.5.9 github.com/google/go-github/v47 v47.1.0 github.com/google/go-github/v50 v50.0.0 diff --git a/go.sum b/go.sum index 88afb0f782..f103ff8348 100644 --- a/go.sum +++ b/go.sum @@ -125,8 +125,9 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78 github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= From 1a8bea9a96934296f0db1a1b424870ac215ca977 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 11:24:20 +0100 Subject: [PATCH 114/561] chore(deps): bump golang.org/x/net from 0.7.0 to 0.8.0 (#2368) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 0b92c55384..00991fa0ae 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/teambition/rrule-go v1.8.2 go.uber.org/multierr v1.7.0 go.uber.org/zap v1.24.0 - golang.org/x/net v0.7.0 + golang.org/x/net v0.8.0 golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 gomodules.xyz/jsonpatch/v2 v2.2.0 @@ -88,9 +88,9 @@ require ( github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/term v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect diff --git a/go.sum b/go.sum index f103ff8348..f6437efd71 100644 --- a/go.sum +++ b/go.sum @@ -453,8 +453,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -523,12 +523,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -536,8 +536,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 356ba2ba6815be3494a1a3effde60e63de8e3862 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 11:27:23 +0100 Subject: [PATCH 115/561] chore(deps): bump k8s.io/client-go from 0.26.1 to 0.26.2 (#2370) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 00991fa0ae..fc7e26566e 100644 --- a/go.mod +++ b/go.mod @@ -31,9 +31,9 @@ require ( golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 gomodules.xyz/jsonpatch/v2 v2.2.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.26.1 - k8s.io/apimachinery v0.26.1 - k8s.io/client-go v0.26.1 + k8s.io/api v0.26.2 + k8s.io/apimachinery v0.26.2 + k8s.io/client-go v0.26.2 sigs.k8s.io/controller-runtime v0.14.4 sigs.k8s.io/yaml v1.3.0 ) diff --git a/go.sum b/go.sum index f6437efd71..f158901b4c 100644 --- a/go.sum +++ b/go.sum @@ -704,14 +704,14 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= -k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= +k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= +k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI= k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM= -k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= -k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= -k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= +k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= +k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= +k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4= k8s.io/component-base v0.26.1/go.mod h1:VHrLR0b58oC035w6YQiBSbtsf0ThuSwXP+p5dD/kAWU= k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= From 61f55b3c9c93cc9c17121f3e2f137d0f7c15660e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 11:27:54 +0100 Subject: [PATCH 116/561] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.7.0 to 2.9.0 (#2369) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 9 ++++++--- go.sum | 18 ++++++++++++------ 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index fc7e26566e..a2ac2be1eb 100644 --- a/go.mod +++ b/go.mod @@ -18,8 +18,8 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.2 github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/ginkgo/v2 v2.7.0 - github.com/onsi/gomega v1.25.0 + github.com/onsi/ginkgo/v2 v2.9.0 + github.com/onsi/gomega v1.27.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/stretchr/testify v1.8.2 @@ -28,7 +28,7 @@ require ( go.uber.org/zap v1.24.0 golang.org/x/net v0.8.0 golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 + golang.org/x/sync v0.1.0 gomodules.xyz/jsonpatch/v2 v2.2.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.26.2 @@ -54,6 +54,7 @@ require ( github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.19.14 // indirect github.com/go-sql-driver/mysql v1.4.1 // indirect + github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect @@ -61,6 +62,7 @@ require ( github.com/google/go-github/v45 v45.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.1.0 // indirect + github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect github.com/gruntwork-io/go-commons v0.8.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -92,6 +94,7 @@ require ( golang.org/x/term v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.6.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index f158901b4c..1078dcde19 100644 --- a/go.sum +++ b/go.sum @@ -121,6 +121,7 @@ github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/ github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -195,6 +196,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -221,6 +224,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -289,12 +293,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= -github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/ginkgo/v2 v2.9.0 h1:Tugw2BKlNHTMfG+CheOITkYvk4LAh6MFOvikhGVnhE8= +github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y= -github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754= +github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -474,8 +478,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -587,6 +591,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 7e8407ae35ad3bdbfeb024c6bdf374fdb5f4dc20 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Tue, 7 Mar 2023 12:05:25 +0100 Subject: [PATCH 117/561] Update gomega with new ginkgo version (#2373) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a2ac2be1eb..90f6c0e072 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 github.com/onsi/ginkgo/v2 v2.9.0 - github.com/onsi/gomega v1.27.1 + github.com/onsi/gomega v1.27.2 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/stretchr/testify v1.8.2 diff --git a/go.sum b/go.sum index 1078dcde19..b09cc55aa7 100644 --- a/go.sum +++ b/go.sum @@ -297,8 +297,8 @@ github.com/onsi/ginkgo/v2 v2.9.0 h1:Tugw2BKlNHTMfG+CheOITkYvk4LAh6MFOvikhGVnhE8= github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754= -github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= +github.com/onsi/gomega v1.27.2 h1:SKU0CXeKE/WVgIV1T61kSa3+IRE8Ekrv9rdXDwwTqnY= +github.com/onsi/gomega v1.27.2/go.mod h1:5mR3phAHpkAVIDkHEUBY6HGVsU+cpcEscrGPB4oPlZI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= From 913874ced44fbecb303c20d61ebae60ec64ba392 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Thu, 9 Mar 2023 09:02:05 -0500 Subject: [PATCH 118/561] Trim slash for configure URL. (#2381) --- .../templates/autoscalingrunnerset.yaml | 2 +- .../tests/template_test.go | 28 ++++++++++++ github/actions/config.go | 4 +- github/actions/config_test.go | 43 ++++++++++++++++++- 4 files changed, 73 insertions(+), 4 deletions(-) diff --git a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index 6ad3c6c8a2..e7f66156ed 100644 --- a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -12,7 +12,7 @@ metadata: labels: {{- include "gha-runner-scale-set.labels" . | nindent 4 }} spec: - githubConfigUrl: {{ required ".Values.githubConfigUrl is required" .Values.githubConfigUrl }} + githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }} githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }} {{- with .Values.runnerGroup }} runnerGroup: {{ . }} diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index e6995dff10..01b522c6dc 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -869,3 +869,31 @@ func TestTemplateNamingConstraints(t *testing.T) { }) } } + +func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions/", + "githubConfigSecret.github_token": "gh_token12345", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Equal(t, namespaceName, ars.Namespace) + assert.Equal(t, "test-runners", ars.Name) + assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) +} diff --git a/github/actions/config.go b/github/actions/config.go index 204fa0a4a2..f19cb17f4c 100644 --- a/github/actions/config.go +++ b/github/actions/config.go @@ -29,7 +29,7 @@ type GitHubConfig struct { } func ParseGitHubConfigFromURL(in string) (*GitHubConfig, error) { - u, err := url.Parse(in) + u, err := url.Parse(strings.Trim(in, "/")) if err != nil { return nil, err } @@ -45,7 +45,7 @@ func ParseGitHubConfigFromURL(in string) (*GitHubConfig, error) { invalidURLError := fmt.Errorf("%q: %w", u.String(), ErrInvalidGitHubConfigURL) - pathParts := strings.Split(strings.TrimPrefix(u.Path, "/"), "/") + pathParts := strings.Split(strings.Trim(u.Path, "/"), "/") switch len(pathParts) { case 1: // Organization diff --git a/github/actions/config_test.go b/github/actions/config_test.go index e64928e262..e21f7e9e94 100644 --- a/github/actions/config_test.go +++ b/github/actions/config_test.go @@ -3,6 +3,7 @@ package actions_test import ( "errors" "net/url" + "strings" "testing" "github.com/actions/actions-runner-controller/github/actions" @@ -26,6 +27,16 @@ func TestGitHubConfig(t *testing.T) { IsHosted: true, }, }, + { + configURL: "https://github.com/org/repo/", + expected: &actions.GitHubConfig{ + Scope: actions.GitHubScopeRepository, + Enterprise: "", + Organization: "org", + Repository: "repo", + IsHosted: true, + }, + }, { configURL: "https://github.com/org", expected: &actions.GitHubConfig{ @@ -46,6 +57,16 @@ func TestGitHubConfig(t *testing.T) { IsHosted: true, }, }, + { + configURL: "https://github.com/enterprises/my-enterprise/", + expected: &actions.GitHubConfig{ + Scope: actions.GitHubScopeEnterprise, + Enterprise: "my-enterprise", + Organization: "", + Repository: "", + IsHosted: true, + }, + }, { configURL: "https://www.github.com/org", expected: &actions.GitHubConfig{ @@ -56,6 +77,16 @@ func TestGitHubConfig(t *testing.T) { IsHosted: true, }, }, + { + configURL: "https://www.github.com/org/", + expected: &actions.GitHubConfig{ + Scope: actions.GitHubScopeOrganization, + Enterprise: "", + Organization: "org", + Repository: "", + IsHosted: true, + }, + }, { configURL: "https://github.localhost/org", expected: &actions.GitHubConfig{ @@ -76,11 +107,21 @@ func TestGitHubConfig(t *testing.T) { IsHosted: false, }, }, + { + configURL: "https://my-ghes.com/org/", + expected: &actions.GitHubConfig{ + Scope: actions.GitHubScopeOrganization, + Enterprise: "", + Organization: "org", + Repository: "", + IsHosted: false, + }, + }, } for _, test := range tests { t.Run(test.configURL, func(t *testing.T) { - parsedURL, err := url.Parse(test.configURL) + parsedURL, err := url.Parse(strings.Trim(test.configURL, "/")) require.NoError(t, err) test.expected.ConfigURL = parsedURL From 0b29421d0471997810dedffc030859f521631608 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Thu, 9 Mar 2023 12:18:53 -0500 Subject: [PATCH 119/561] Update permission ADR based on prototype. (#2383) Co-authored-by: Nikola Jokic --- ...023-02-10-limit-manager-role-permission.md | 52 ++++++++++++++++--- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/adrs/2023-02-10-limit-manager-role-permission.md b/adrs/2023-02-10-limit-manager-role-permission.md index 879f1e07c7..9838b4a4cd 100644 --- a/adrs/2023-02-10-limit-manager-role-permission.md +++ b/adrs/2023-02-10-limit-manager-role-permission.md @@ -66,18 +66,54 @@ To help these customers and improve security for `actions-runner-controller` in - List/Watch on `RoleBindings` - List/Watch on `ServiceAccounts` -> We will change the default cache-based client to bypass cache on reading `Secrets`, so we can eliminate the need for `List` and `Watch` `Secrets` permission in cluster scope. +> We will change the default cache-based client to bypass cache on reading `Secrets` and `ConfigMaps`(ConfigMap is used when you configure `githubServerTLS`), so we can eliminate the need for `List` and `Watch` `Secrets` permission in cluster scope. -Introduce a new `Role` per `AutoScalingRunnerSet` installation and `RoleBinding` the `Role` with the controller's service account in the namespace that each `AutoScalingRunnerSet` deployed with the following permission. +Introduce a new `Role` for the controller and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace the controller is deployed. This role will grant the controller's service account required permission to work with `AutoScalingListeners` in the controller namespace. +- Get/Create/Delete on `Pods` +- Get on `Pods/status` - Get/Create/Delete/Update/Patch on `Secrets` -- Get/Create/Delete/Update/Patch on `Pods` +- Get/Create/Delete/Update/Patch on `ServiceAccounts` + +The `Role` and `RoleBinding` creation will happen during the `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller` + +During `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`, we will store the controller's service account info as labels on the controller `Deployment`. +Ex: +```yaml + actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }} + actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} +``` + +Introduce a new `Role` per `AutoScalingRunnerSet` installation and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace that each `AutoScalingRunnerSet` deployed with the following permission. + +- Get/Create/Delete/Update/Patch/List on `Secrets` +- Create/Delete on `Pods` +- Get on `Pods/status` - Get/Create/Delete/Update/Patch on `Roles` - Get/Create/Delete/Update/Patch on `RoleBindings` -- Get/Create/Delete/Update/Patch on `ServiceAccounts` +- Get on `ConfigMaps` The `Role` and `RoleBinding` creation will happen during `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set` to grant the controller's service account required permissions to operate in the namespace the `AutoScalingRunnerSet` deployed. +The `gha-runner-scale-set` helm chart will try to find the `Deployment` of the controller using `helm lookup`, and get the service account info from the labels of the controller `Deployment` (`actions.github.com/controller-service-account-namespace` and `actions.github.com/controller-service-account-name`). + +The `gha-runner-scale-set` helm chart will use this service account to properly render the `RoleBinding` template. + +The `gha-runner-scale-set` helm chart will also allow customers to explicitly provide the controller service account info, in case the `helm lookup` couldn't locate the right controller `Deployment`. + +New sections in `values.yaml` of `gha-runner-scale-set`: +```yaml +## Optional controller service account that needs to have required Role and RoleBinding +## to operate this gha-runner-scale-set installation. +## The helm chart will try to find the controller deployment and its service account at installation time. +## In case the helm chart can't find the right service account, you can explicitly pass in the following value +## to help it finish RoleBinding with the right service account. +## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly. +controllerServiceAccount: + namespace: arc-system + name: test-arc-gha-runner-scale-set-controller +``` + ## Install ARC to only watch/react resources in a single namespace In case the user doesn't want to have any `ClusterRole`, they can choose to install the `actions-runner-controller` in a mode that only requires a `Role` with `RoleBinding` in a particular namespace. @@ -86,10 +122,12 @@ In this mode, the `actions-runner-controller` will only be able to watch the `Au If you want to deploy multiple `AutoScalingRunnerSet` into different namespaces, you will need to install `actions-runner-controller` in this mode multiple times as well and have each installation watch the namespace you want to deploy an `AutoScalingRunnerSet` -You will install `actions-runner-controller` with something like `helm install arc --set watchSingleNamespace=TestNamespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller` (the `TestNamespace` namespace needs to be created first). +You will install `actions-runner-controller` with something like `helm install arc --namespace arc-system --set watchSingleNamespace=test-namespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller` (the `test-namespace` namespace needs to be created first). You will deploy the `AutoScalingRunnerSet` with something like `helm install demo --namespace TestNamespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set` -In this mode, you will end up with a manager `Role` that has all Get/List/Create/Delete/Update/Patch/Watch permissions on resources we need, and a `RoleBinding` to bind the `Role` with the controller `ServiceAccount` in the watched single namespace, ex: `TestNamespace` in the above example. +In this mode, you will end up with a manager `Role` that has all Get/List/Create/Delete/Update/Patch/Watch permissions on resources we need, and a `RoleBinding` to bind the `Role` with the controller `ServiceAccount` in the watched single namespace and the controller namespace, ex: `test-namespace` and `arc-system` in the above example. -The downside of this mode is when you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other. \ No newline at end of file +The downside of this mode: +- When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other. +- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster. \ No newline at end of file From 234ad124173a01e2ecc1e1cae63981fc4fbff6d2 Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Thu, 9 Mar 2023 17:23:32 +0000 Subject: [PATCH 120/561] Add support for self-signed CA certificates (#2268) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Co-authored-by: Nikola Jokic Co-authored-by: Tingluo Huang --- .../v1alpha1/autoscalinglistener_types.go | 3 + .../v1alpha1/autoscalingrunnerset_types.go | 40 +- .../v1alpha1/tls_config_test.go | 105 +++++ .../v1alpha1/zz_generated.deepcopy.go | 34 +- ...tions.github.com_autoscalinglisteners.yaml | 22 ++ ...ions.github.com_autoscalingrunnersets.yaml | 20 +- .../actions.github.com_ephemeralrunners.yaml | 20 +- ...ctions.github.com_ephemeralrunnersets.yaml | 20 +- .../templates/manager_role.yaml | 9 +- .../tests/template_test.go | 2 +- .../templates/_helpers.tpl | 154 +++++++- .../templates/autoscalingrunnerset.yaml | 18 +- .../tests/template_test.go | 359 ++++++++++++++++++ charts/gha-runner-scale-set/values.yaml | 25 +- cmd/githubrunnerscalesetlistener/main.go | 18 +- cmd/githubrunnerscalesetlistener/main_test.go | 52 +++ ...tions.github.com_autoscalinglisteners.yaml | 22 ++ ...ions.github.com_autoscalingrunnersets.yaml | 20 +- .../actions.github.com_ephemeralrunners.yaml | 20 +- ...ctions.github.com_ephemeralrunnersets.yaml | 20 +- .../autoscalinglistener_controller.go | 50 +++ .../autoscalinglistener_controller_test.go | 164 ++++++++ .../autoscalingrunnerset_controller.go | 53 ++- .../autoscalingrunnerset_controller_test.go | 241 ++++++++++++ .../ephemeralrunner_controller.go | 48 ++- .../ephemeralrunner_controller_test.go | 100 +++++ .../ephemeralrunnerset_controller.go | 49 ++- .../ephemeralrunnerset_controller_test.go | 149 ++++++++ .../actions.github.com/resourcebuilder.go | 1 + controllers/actions.github.com/suite_test.go | 8 +- github/actions/actions_server_test.go | 6 - github/actions/client.go | 6 + github/actions/client_tls_test.go | 8 +- github/actions/github_api_request_test.go | 21 +- github/actions/identifier_test.go | 47 +++ github/actions/multi_client.go | 19 +- 36 files changed, 1860 insertions(+), 93 deletions(-) create mode 100644 apis/actions.github.com/v1alpha1/tls_config_test.go diff --git a/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go b/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go index e4e5c383d9..8245865787 100644 --- a/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go +++ b/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go @@ -57,6 +57,9 @@ type AutoscalingListenerSpec struct { // +optional Proxy *ProxyConfig `json:"proxy,omitempty"` + + // +optional + GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"` } // AutoscalingListenerStatus defines the observed state of AutoscalingListener diff --git a/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go index 8f2bf1024c..adc9a94e0e 100644 --- a/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go +++ b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + "crypto/x509" "fmt" "net/http" "net/url" @@ -80,7 +81,44 @@ type AutoscalingRunnerSetSpec struct { type GitHubServerTLSConfig struct { // Required - RootCAsConfigMapRef string `json:"certConfigMapRef,omitempty"` + CertificateFrom *TLSCertificateSource `json:"certificateFrom,omitempty"` +} + +func (c *GitHubServerTLSConfig) ToCertPool(keyFetcher func(name, key string) ([]byte, error)) (*x509.CertPool, error) { + if c.CertificateFrom == nil { + return nil, fmt.Errorf("certificateFrom not specified") + } + + if c.CertificateFrom.ConfigMapKeyRef == nil { + return nil, fmt.Errorf("configMapKeyRef not specified") + } + + cert, err := keyFetcher(c.CertificateFrom.ConfigMapKeyRef.Name, c.CertificateFrom.ConfigMapKeyRef.Key) + if err != nil { + return nil, fmt.Errorf( + "failed to fetch key %q in configmap %q: %w", + c.CertificateFrom.ConfigMapKeyRef.Key, + c.CertificateFrom.ConfigMapKeyRef.Name, + err, + ) + } + + systemPool, err := x509.SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to get system cert pool: %w", err) + } + + pool := systemPool.Clone() + if !pool.AppendCertsFromPEM(cert) { + return nil, fmt.Errorf("failed to parse certificate") + } + + return pool, nil +} + +type TLSCertificateSource struct { + // Required + ConfigMapKeyRef *corev1.ConfigMapKeySelector `json:"configMapKeyRef,omitempty"` } type ProxyConfig struct { diff --git a/apis/actions.github.com/v1alpha1/tls_config_test.go b/apis/actions.github.com/v1alpha1/tls_config_test.go new file mode 100644 index 0000000000..c3a74bf72a --- /dev/null +++ b/apis/actions.github.com/v1alpha1/tls_config_test.go @@ -0,0 +1,105 @@ +package v1alpha1_test + +import ( + "crypto/tls" + "crypto/x509" + "net/http" + "os" + "path/filepath" + "testing" + + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/github/actions/testserver" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" +) + +func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) { + t.Run("returns an error if CertificateFrom not specified", func(t *testing.T) { + c := &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: nil, + } + + pool, err := c.ToCertPool(nil) + assert.Nil(t, pool) + + require.Error(t, err) + assert.Equal(t, err.Error(), "certificateFrom not specified") + }) + + t.Run("returns an error if CertificateFrom.ConfigMapKeyRef not specified", func(t *testing.T) { + c := &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{}, + } + + pool, err := c.ToCertPool(nil) + assert.Nil(t, pool) + + require.Error(t, err) + assert.Equal(t, err.Error(), "configMapKeyRef not specified") + }) + + t.Run("returns a valid cert pool with correct configuration", func(t *testing.T) { + c := &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &v1.ConfigMapKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "name", + }, + Key: "key", + }, + }, + } + + certsFolder := filepath.Join( + "../../../", + "github", + "actions", + "testdata", + ) + + fetcher := func(name, key string) ([]byte, error) { + cert, err := os.ReadFile(filepath.Join(certsFolder, "rootCA.crt")) + require.NoError(t, err) + + pool := x509.NewCertPool() + ok := pool.AppendCertsFromPEM(cert) + assert.True(t, ok) + + return cert, nil + } + + pool, err := c.ToCertPool(fetcher) + require.NoError(t, err) + require.NotNil(t, pool) + + // can be used to communicate with a server + serverSuccessfullyCalled := false + server := testserver.NewUnstarted(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverSuccessfullyCalled = true + w.WriteHeader(http.StatusOK) + })) + + cert, err := tls.LoadX509KeyPair( + filepath.Join(certsFolder, "server.crt"), + filepath.Join(certsFolder, "server.key"), + ) + require.NoError(t, err) + + server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} + server.StartTLS() + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }, + } + + _, err = client.Get(server.URL) + assert.NoError(t, err) + assert.True(t, serverSuccessfullyCalled) + }) +} diff --git a/apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go b/apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go index 324707b25e..3246592b48 100644 --- a/apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go +++ b/apis/actions.github.com/v1alpha1/zz_generated.deepcopy.go @@ -98,6 +98,11 @@ func (in *AutoscalingListenerSpec) DeepCopyInto(out *AutoscalingListenerSpec) { *out = new(ProxyConfig) (*in).DeepCopyInto(*out) } + if in.GitHubServerTLS != nil { + in, out := &in.GitHubServerTLS, &out.GitHubServerTLS + *out = new(GitHubServerTLSConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingListenerSpec. @@ -195,7 +200,7 @@ func (in *AutoscalingRunnerSetSpec) DeepCopyInto(out *AutoscalingRunnerSetSpec) if in.GitHubServerTLS != nil { in, out := &in.GitHubServerTLS, &out.GitHubServerTLS *out = new(GitHubServerTLSConfig) - **out = **in + (*in).DeepCopyInto(*out) } in.Template.DeepCopyInto(&out.Template) if in.MaxRunners != nil { @@ -395,7 +400,7 @@ func (in *EphemeralRunnerSpec) DeepCopyInto(out *EphemeralRunnerSpec) { if in.GitHubServerTLS != nil { in, out := &in.GitHubServerTLS, &out.GitHubServerTLS *out = new(GitHubServerTLSConfig) - **out = **in + (*in).DeepCopyInto(*out) } in.PodTemplateSpec.DeepCopyInto(&out.PodTemplateSpec) } @@ -435,6 +440,11 @@ func (in *EphemeralRunnerStatus) DeepCopy() *EphemeralRunnerStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitHubServerTLSConfig) DeepCopyInto(out *GitHubServerTLSConfig) { *out = *in + if in.CertificateFrom != nil { + in, out := &in.CertificateFrom, &out.CertificateFrom + *out = new(TLSCertificateSource) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubServerTLSConfig. @@ -491,3 +501,23 @@ func (in *ProxyServerConfig) DeepCopy() *ProxyServerConfig { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSCertificateSource) DeepCopyInto(out *TLSCertificateSource) { + *out = *in + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(v1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCertificateSource. +func (in *TLSCertificateSource) DeepCopy() *TLSCertificateSource { + if in == nil { + return nil + } + out := new(TLSCertificateSource) + in.DeepCopyInto(out) + return out +} diff --git a/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalinglisteners.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalinglisteners.yaml index f0f3f8fb11..6df9c05192 100644 --- a/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalinglisteners.yaml +++ b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalinglisteners.yaml @@ -55,6 +55,28 @@ spec: githubConfigUrl: description: Required type: string + githubServerTLS: + properties: + certificateFrom: + description: Required + properties: + configMapKeyRef: + description: Required + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + type: object + type: object image: description: Required type: string diff --git a/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml index 12c4b5b837..6c4c82cbf3 100644 --- a/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml +++ b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml @@ -51,9 +51,25 @@ spec: type: string githubServerTLS: properties: - certConfigMapRef: + certificateFrom: description: Required - type: string + properties: + configMapKeyRef: + description: Required + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + type: object type: object maxRunners: minimum: 0 diff --git a/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunners.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunners.yaml index 41cdc81b45..b0ce1e4b83 100644 --- a/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunners.yaml +++ b/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunners.yaml @@ -64,9 +64,25 @@ spec: type: string githubServerTLS: properties: - certConfigMapRef: + certificateFrom: description: Required - type: string + properties: + configMapKeyRef: + description: Required + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + type: object type: object metadata: description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' diff --git a/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunnersets.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunnersets.yaml index 072cd265fb..86a3f40ce7 100644 --- a/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunnersets.yaml +++ b/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunnersets.yaml @@ -46,9 +46,25 @@ spec: type: string githubServerTLS: properties: - certConfigMapRef: + certificateFrom: description: Required - type: string + properties: + configMapKeyRef: + description: Required + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + type: object type: object metadata: description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' diff --git a/charts/gha-runner-scale-set-controller/templates/manager_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_role.yaml index f51b47c4e2..e9457cfac2 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_role.yaml @@ -146,6 +146,13 @@ rules: - get - list - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch - apiGroups: - rbac.authorization.k8s.io resources: @@ -167,4 +174,4 @@ rules: - get - update - list - - watch \ No newline at end of file + - watch diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index 6f9c47d6e5..00ab04b5a4 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -169,7 +169,7 @@ func TestTemplate_CreateManagerRole(t *testing.T) { assert.Empty(t, managerRole.Namespace, "ClusterRole should not have a namespace") assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-role", managerRole.Name) - assert.Equal(t, 17, len(managerRole.Rules)) + assert.Equal(t, 18, len(managerRole.Rules)) } func TestTemplate_ManagerRoleBinding(t *testing.T) { diff --git a/charts/gha-runner-scale-set/templates/_helpers.tpl b/charts/gha-runner-scale-set/templates/_helpers.tpl index 944bf97926..433d84ebaf 100644 --- a/charts/gha-runner-scale-set/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set/templates/_helpers.tpl @@ -111,6 +111,15 @@ volumeMounts: emptyDir: {} {{- end }} +{{- define "gha-runner-scale-set.tls-volume" -}} +- name: github-server-tls-cert + configMap: + name: {{ .certificateFrom.configMapKeyRef.name }} + items: + - key: {{ .certificateFrom.configMapKeyRef.key }} + path: {{ .certificateFrom.configMapKeyRef.key }} +{{- end }} + {{- define "gha-runner-scale-set.dind-work-volume" -}} {{- $createWorkVolume := 1 }} {{- range $i, $volume := .Values.template.spec.volumes }} @@ -155,12 +164,7 @@ volumeMounts: {{- define "gha-runner-scale-set.non-work-volumes" -}} {{- range $i, $volume := .Values.template.spec.volumes }} {{- if ne $volume.name "work" }} -- name: {{ $volume.name }} - {{- range $key, $val := $volume }} - {{- if ne $key "name" }} - {{ $key }}: {{ $val }} - {{- end }} - {{- end }} +- {{ $volume | toYaml | nindent 2 }} {{- end }} {{- end }} {{- end }} @@ -179,6 +183,7 @@ volumeMounts: {{- end }} {{- define "gha-runner-scale-set.dind-runner-container" -}} +{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} {{- range $i, $container := .Values.template.spec.containers -}} {{- if eq $container.name "runner" -}} {{- range $key, $val := $container }} @@ -190,6 +195,12 @@ volumeMounts: {{- $setDockerTlsVerify := 1 }} {{- $setDockerCertPath := 1 }} {{- $setRunnerWaitDocker := 1 }} + {{- $setNodeExtraCaCerts := 0 }} + {{- $setRunnerUpdateCaCerts := 0 }} + {{- if $tlsConfig.runnerMountPath }} + {{- $setNodeExtraCaCerts = 1 }} + {{- $setRunnerUpdateCaCerts = 1 }} + {{- end }} env: {{- with $container.env }} {{- range $i, $env := . }} @@ -205,6 +216,12 @@ env: {{- if eq $env.name "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS" }} {{- $setRunnerWaitDocker = 0 -}} {{- end }} + {{- if eq $env.name "NODE_EXTRA_CA_CERTS" }} + {{- $setNodeExtraCaCerts = 0 -}} + {{- end }} + {{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }} + {{- $setRunnerUpdateCaCerts = 0 -}} + {{- end }} - name: {{ $env.name }} {{- range $envKey, $envVal := $env }} {{- if ne $envKey "name" }} @@ -229,8 +246,20 @@ env: - name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS value: "120" {{- end }} + {{- if $setNodeExtraCaCerts }} + - name: NODE_EXTRA_CA_CERTS + value: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }} + {{- end }} + {{- if $setRunnerUpdateCaCerts }} + - name: RUNNER_UPDATE_CA_CERTS + value: "1" + {{- end }} {{- $mountWork := 1 }} {{- $mountDindCert := 1 }} + {{- $mountGitHubServerTLS := 0 }} + {{- if $tlsConfig.runnerMountPath }} + {{- $mountGitHubServerTLS = 1 }} + {{- end }} volumeMounts: {{- with $container.volumeMounts }} {{- range $i, $volMount := . }} @@ -240,6 +269,9 @@ volumeMounts: {{- if eq $volMount.name "dind-cert" }} {{- $mountDindCert = 0 -}} {{- end }} + {{- if eq $volMount.name "github-server-tls-cert" }} + {{- $mountGitHubServerTLS = 0 -}} + {{- end }} - name: {{ $volMount.name }} {{- range $mountKey, $mountVal := $volMount }} {{- if ne $mountKey "name" }} @@ -257,11 +289,17 @@ volumeMounts: mountPath: /certs/client readOnly: true {{- end }} + {{- if $mountGitHubServerTLS }} + - name: github-server-tls-cert + mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }} + subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }} + {{- end }} {{- end }} {{- end }} {{- end }} {{- define "gha-runner-scale-set.kubernetes-mode-runner-container" -}} +{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} {{- range $i, $container := .Values.template.spec.containers -}} {{- if eq $container.name "runner" -}} {{- range $key, $val := $container }} @@ -272,6 +310,12 @@ volumeMounts: {{- $setContainerHooks := 1 }} {{- $setPodName := 1 }} {{- $setRequireJobContainer := 1 }} + {{- $setNodeExtraCaCerts := 0 }} + {{- $setRunnerUpdateCaCerts := 0 }} + {{- if $tlsConfig.runnerMountPath }} + {{- $setNodeExtraCaCerts = 1 }} + {{- $setRunnerUpdateCaCerts = 1 }} + {{- end }} env: {{- with $container.env }} {{- range $i, $env := . }} @@ -284,6 +328,12 @@ env: {{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }} {{- $setRequireJobContainer = 0 -}} {{- end }} + {{- if eq $env.name "NODE_EXTRA_CA_CERTS" }} + {{- $setNodeExtraCaCerts = 0 -}} + {{- end }} + {{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }} + {{- $setRunnerUpdateCaCerts = 0 -}} + {{- end }} - name: {{ $env.name }} {{- range $envKey, $envVal := $env }} {{- if ne $envKey "name" }} @@ -306,13 +356,28 @@ env: - name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER value: "true" {{- end }} + {{- if $setNodeExtraCaCerts }} + - name: NODE_EXTRA_CA_CERTS + value: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }} + {{- end }} + {{- if $setRunnerUpdateCaCerts }} + - name: RUNNER_UPDATE_CA_CERTS + value: "1" + {{- end }} {{- $mountWork := 1 }} + {{- $mountGitHubServerTLS := 0 }} + {{- if $tlsConfig.runnerMountPath }} + {{- $mountGitHubServerTLS = 1 }} + {{- end }} volumeMounts: {{- with $container.volumeMounts }} {{- range $i, $volMount := . }} {{- if eq $volMount.name "work" }} {{- $mountWork = 0 -}} {{- end }} + {{- if eq $volMount.name "github-server-tls-cert" }} + {{- $mountGitHubServerTLS = 0 -}} + {{- end }} - name: {{ $volMount.name }} {{- range $mountKey, $mountVal := $volMount }} {{- if ne $mountKey "name" }} @@ -325,6 +390,81 @@ volumeMounts: - name: work mountPath: /actions-runner/_work {{- end }} + {{- if $mountGitHubServerTLS }} + - name: github-server-tls-cert + mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }} + subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "gha-runner-scale-set.default-mode-runner-containers" -}} +{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} +{{- range $i, $container := .Values.template.spec.containers -}} +{{- if ne $container.name "runner" -}} +- {{ $container | toYaml | nindent 2 }} +{{- else }} +- name: {{ $container.name }} + {{- range $key, $val := $container }} + {{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }} + {{ $key }}: {{ $val }} + {{- end }} {{- end }} + {{- $setNodeExtraCaCerts := 0 }} + {{- $setRunnerUpdateCaCerts := 0 }} + {{- if $tlsConfig.runnerMountPath }} + {{- $setNodeExtraCaCerts = 1 }} + {{- $setRunnerUpdateCaCerts = 1 }} + {{- end }} + env: + {{- with $container.env }} + {{- range $i, $env := . }} + {{- if eq $env.name "NODE_EXTRA_CA_CERTS" }} + {{- $setNodeExtraCaCerts = 0 -}} + {{- end }} + {{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }} + {{- $setRunnerUpdateCaCerts = 0 -}} + {{- end }} + - name: {{ $env.name }} + {{- range $envKey, $envVal := $env }} + {{- if ne $envKey "name" }} + {{ $envKey }}: {{ $envVal | toYaml | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $setNodeExtraCaCerts }} + - name: NODE_EXTRA_CA_CERTS + value: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }} + {{- end }} + {{- if $setRunnerUpdateCaCerts }} + - name: RUNNER_UPDATE_CA_CERTS + value: "1" + {{- end }} + {{- $mountGitHubServerTLS := 0 }} + {{- if $tlsConfig.runnerMountPath }} + {{- $mountGitHubServerTLS = 1 }} + {{- end }} + volumeMounts: + {{- with $container.volumeMounts }} + {{- range $i, $volMount := . }} + {{- if eq $volMount.name "github-server-tls-cert" }} + {{- $mountGitHubServerTLS = 0 -}} + {{- end }} + - name: {{ $volMount.name }} + {{- range $mountKey, $mountVal := $volMount }} + {{- if ne $mountKey "name" }} + {{ $mountKey }}: {{ $mountVal | toYaml | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $mountGitHubServerTLS }} + - name: github-server-tls-cert + mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }} + subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }} + {{- end }} +{{- end }} +{{- end }} {{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index e7f66156ed..6ec90340dd 100644 --- a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -21,6 +21,16 @@ spec: runnerScaleSetName: {{ . }} {{- end }} + {{- if .Values.githubServerTLS }} + githubServerTLS: + {{- with .Values.githubServerTLS.certificateFrom }} + certificateFrom: + configMapKeyRef: + name: {{ .configMapKeyRef.name }} + key: {{ .configMapKeyRef.key }} + {{- end }} + {{- end }} + {{- if .Values.proxy }} proxy: {{- if .Values.proxy.http }} @@ -103,10 +113,14 @@ spec: {{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }} {{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }} {{- else }} - {{ .Values.template.spec.containers | toYaml | nindent 6 }} + {{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }} {{- end }} - {{- if or .Values.template.spec.volumes (eq .Values.containerMode.type "dind") (eq .Values.containerMode.type "kubernetes") }} + {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} + {{- if or .Values.template.spec.volumes (eq .Values.containerMode.type "dind") (eq .Values.containerMode.type "kubernetes") $tlsConfig.runnerMountPath }} volumes: + {{- if $tlsConfig.runnerMountPath }} + {{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }} + {{- end }} {{- if eq .Values.containerMode.type "dind" }} {{- include "gha-runner-scale-set.dind-volume" . | nindent 6 }} {{- include "gha-runner-scale-set.dind-work-volume" . | nindent 6 }} diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index 01b522c6dc..3493deb6ba 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -828,6 +828,365 @@ func TestTemplateRenderedWithProxy(t *testing.T) { assert.Contains(t, ars.Spec.Proxy.NoProxy, "example.org") } +func TestTemplateRenderedWithTLS(t *testing.T) { + t.Parallel() + + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + render := func(t *testing.T, options *helm.Options) v1alpha1.AutoscalingRunnerSet { + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + releaseName := "test-runners" + + output := helm.RenderTemplate( + t, + options, + helmChartPath, + releaseName, + []string{"templates/autoscalingrunnerset.yaml"}, + ) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + return ars + } + + t.Run("providing githubServerTLS.runnerMountPath", func(t *testing.T) { + t.Run("mode: default", func(t *testing.T) { + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secrets", + "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", + "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", + "githubServerTLS.runnerMountPath": "/runner/mount/path", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + ars := render(t, options) + + require.NotNil(t, ars.Spec.GitHubServerTLS) + expected := &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "certs-configmap", + }, + Key: "cert.pem", + }, + }, + } + assert.Equal(t, expected, ars.Spec.GitHubServerTLS) + + var volume *corev1.Volume + for _, v := range ars.Spec.Template.Spec.Volumes { + if v.Name == "github-server-tls-cert" { + volume = &v + break + } + } + require.NotNil(t, volume) + assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name) + assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key) + assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path) + + assert.Contains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: "github-server-tls-cert", + MountPath: "/runner/mount/path/cert.pem", + SubPath: "cert.pem", + }) + + assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "NODE_EXTRA_CA_CERTS", + Value: "/runner/mount/path/cert.pem", + }) + + assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "RUNNER_UPDATE_CA_CERTS", + Value: "1", + }) + }) + + t.Run("mode: dind", func(t *testing.T) { + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secrets", + "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", + "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", + "githubServerTLS.runnerMountPath": "/runner/mount/path/", + "containerMode.type": "dind", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + ars := render(t, options) + + require.NotNil(t, ars.Spec.GitHubServerTLS) + expected := &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "certs-configmap", + }, + Key: "cert.pem", + }, + }, + } + assert.Equal(t, expected, ars.Spec.GitHubServerTLS) + + var volume *corev1.Volume + for _, v := range ars.Spec.Template.Spec.Volumes { + if v.Name == "github-server-tls-cert" { + volume = &v + break + } + } + require.NotNil(t, volume) + assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name) + assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key) + assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path) + + assert.Contains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: "github-server-tls-cert", + MountPath: "/runner/mount/path/cert.pem", + SubPath: "cert.pem", + }) + + assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "NODE_EXTRA_CA_CERTS", + Value: "/runner/mount/path/cert.pem", + }) + + assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "RUNNER_UPDATE_CA_CERTS", + Value: "1", + }) + }) + + t.Run("mode: kubernetes", func(t *testing.T) { + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secrets", + "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", + "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", + "githubServerTLS.runnerMountPath": "/runner/mount/path", + "containerMode.type": "kubernetes", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + ars := render(t, options) + + require.NotNil(t, ars.Spec.GitHubServerTLS) + expected := &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "certs-configmap", + }, + Key: "cert.pem", + }, + }, + } + assert.Equal(t, expected, ars.Spec.GitHubServerTLS) + + var volume *corev1.Volume + for _, v := range ars.Spec.Template.Spec.Volumes { + if v.Name == "github-server-tls-cert" { + volume = &v + break + } + } + require.NotNil(t, volume) + assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name) + assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key) + assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path) + + assert.Contains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: "github-server-tls-cert", + MountPath: "/runner/mount/path/cert.pem", + SubPath: "cert.pem", + }) + + assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "NODE_EXTRA_CA_CERTS", + Value: "/runner/mount/path/cert.pem", + }) + + assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "RUNNER_UPDATE_CA_CERTS", + Value: "1", + }) + }) + }) + + t.Run("without providing githubServerTLS.runnerMountPath", func(t *testing.T) { + t.Run("mode: default", func(t *testing.T) { + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secrets", + "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", + "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + ars := render(t, options) + + require.NotNil(t, ars.Spec.GitHubServerTLS) + expected := &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "certs-configmap", + }, + Key: "cert.pem", + }, + }, + } + assert.Equal(t, expected, ars.Spec.GitHubServerTLS) + + var volume *corev1.Volume + for _, v := range ars.Spec.Template.Spec.Volumes { + if v.Name == "github-server-tls-cert" { + volume = &v + break + } + } + assert.Nil(t, volume) + + assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: "github-server-tls-cert", + MountPath: "/runner/mount/path/cert.pem", + SubPath: "cert.pem", + }) + + assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "NODE_EXTRA_CA_CERTS", + Value: "/runner/mount/path/cert.pem", + }) + + assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "RUNNER_UPDATE_CA_CERTS", + Value: "1", + }) + }) + + t.Run("mode: dind", func(t *testing.T) { + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secrets", + "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", + "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", + "containerMode.type": "dind", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + ars := render(t, options) + + require.NotNil(t, ars.Spec.GitHubServerTLS) + expected := &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "certs-configmap", + }, + Key: "cert.pem", + }, + }, + } + assert.Equal(t, expected, ars.Spec.GitHubServerTLS) + + var volume *corev1.Volume + for _, v := range ars.Spec.Template.Spec.Volumes { + if v.Name == "github-server-tls-cert" { + volume = &v + break + } + } + assert.Nil(t, volume) + + assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: "github-server-tls-cert", + MountPath: "/runner/mount/path/cert.pem", + SubPath: "cert.pem", + }) + + assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "NODE_EXTRA_CA_CERTS", + Value: "/runner/mount/path/cert.pem", + }) + + assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "RUNNER_UPDATE_CA_CERTS", + Value: "1", + }) + }) + + t.Run("mode: kubernetes", func(t *testing.T) { + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secrets", + "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", + "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", + "containerMode.type": "kubernetes", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + ars := render(t, options) + + require.NotNil(t, ars.Spec.GitHubServerTLS) + expected := &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "certs-configmap", + }, + Key: "cert.pem", + }, + }, + } + assert.Equal(t, expected, ars.Spec.GitHubServerTLS) + + var volume *corev1.Volume + for _, v := range ars.Spec.Template.Spec.Volumes { + if v.Name == "github-server-tls-cert" { + volume = &v + break + } + } + assert.Nil(t, volume) + + assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: "github-server-tls-cert", + MountPath: "/runner/mount/path/cert.pem", + SubPath: "cert.pem", + }) + + assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "NODE_EXTRA_CA_CERTS", + Value: "/runner/mount/path/cert.pem", + }) + + assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "RUNNER_UPDATE_CA_CERTS", + Value: "1", + }) + }) + }) +} + func TestTemplateNamingConstraints(t *testing.T) { t.Parallel() diff --git a/charts/gha-runner-scale-set/values.yaml b/charts/gha-runner-scale-set/values.yaml index 8845daa8a4..94ea1d7dcd 100644 --- a/charts/gha-runner-scale-set/values.yaml +++ b/charts/gha-runner-scale-set/values.yaml @@ -4,7 +4,7 @@ githubConfigUrl: "" ## githubConfigSecret is the k8s secrets to use when auth with GitHub API. ## You can choose to use GitHub App or a PAT token -githubConfigSecret: +githubConfigSecret: ### GitHub Apps Configuration ## NOTE: IDs MUST be strings, use quotes #github_app_id: "" @@ -47,6 +47,27 @@ githubConfigSecret: ## name of the runner scale set to create. Defaults to the helm release name # runnerScaleSetName: "" +## A self-signed CA certificate for communication with the GitHub server can be +## provided using a config map key selector. If `runnerMountPath` is set, for +## each runner pod ARC will: +## - create a `github-server-tls-cert` volume containing the certificate +## specified in `certificateFrom` +## - mount that volume on path `runnerMountPath`/{certificate name} +## - set NODE_EXTRA_CA_CERTS environment variable to that same path +## - set RUNNER_UPDATE_CA_CERTS environment variable to "1" (as of version +## 2.303.0 this will instruct the runner to reload certificates on the host) +## +## If any of the above had already been set by the user in the runner pod +## template, ARC will observe those and not overwrite them. +## Example configuration: +# +# githubServerTLS: +# certificateFrom: +# configMapKeyRef: +# name: config-map-name +# key: ca.pem +# runnerMountPath: /usr/local/share/ca-certificates/ + ## template is the PodSpec for each runner Pod template: spec: @@ -139,4 +160,4 @@ containerMode: storageClassName: "dynamic-blob-storage" resources: requests: - storage: 1Gi \ No newline at end of file + storage: 1Gi diff --git a/cmd/githubrunnerscalesetlistener/main.go b/cmd/githubrunnerscalesetlistener/main.go index 3813617a79..64abf6cfad 100644 --- a/cmd/githubrunnerscalesetlistener/main.go +++ b/cmd/githubrunnerscalesetlistener/main.go @@ -18,6 +18,7 @@ package main import ( "context" + "crypto/x509" "fmt" "net/http" "net/url" @@ -44,6 +45,7 @@ type RunnerScaleSetListenerConfig struct { MaxRunners int `split_words:"true"` MinRunners int `split_words:"true"` RunnerScaleSetId int `split_words:"true"` + ServerRootCA string `split_words:"true"` } func main() { @@ -90,8 +92,8 @@ func run(rc RunnerScaleSetListenerConfig, logger logr.Logger) error { actionsServiceClient, err := newActionsClientFromConfig( rc, creds, - actions.WithUserAgent(fmt.Sprintf("actions-runner-controller/%s", build.Version)), actions.WithLogger(logger), + actions.WithUserAgent(fmt.Sprintf("actions-runner-controller/%s", build.Version)), ) if err != nil { return fmt.Errorf("failed to create an Actions Service client: %w", err) @@ -160,6 +162,20 @@ func validateConfig(config *RunnerScaleSetListenerConfig) error { } func newActionsClientFromConfig(config RunnerScaleSetListenerConfig, creds *actions.ActionsAuth, options ...actions.ClientOption) (*actions.Client, error) { + if config.ServerRootCA != "" { + systemPool, err := x509.SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to load system cert pool: %w", err) + } + pool := systemPool.Clone() + ok := pool.AppendCertsFromPEM([]byte(config.ServerRootCA)) + if !ok { + return nil, fmt.Errorf("failed to parse root certificate") + } + + options = append(options, actions.WithRootCAs(pool)) + } + proxyFunc := httpproxy.FromEnvironment().ProxyFunc() options = append(options, actions.WithProxy(func(req *http.Request) (*url.URL, error) { return proxyFunc(req.URL) diff --git a/cmd/githubrunnerscalesetlistener/main_test.go b/cmd/githubrunnerscalesetlistener/main_test.go index 619bfeb69f..e4c1df0320 100644 --- a/cmd/githubrunnerscalesetlistener/main_test.go +++ b/cmd/githubrunnerscalesetlistener/main_test.go @@ -1,16 +1,20 @@ package main import ( + "context" + "crypto/tls" "fmt" "net/http" "net/http/httptest" "os" + "path/filepath" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/actions/actions-runner-controller/github/actions" + "github.com/actions/actions-runner-controller/github/actions/testserver" ) func TestConfigValidationMinMax(t *testing.T) { @@ -97,6 +101,54 @@ func TestConfigValidationConfigUrl(t *testing.T) { assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl") } +func TestCustomerServerRootCA(t *testing.T) { + ctx := context.Background() + certsFolder := filepath.Join( + "../../", + "github", + "actions", + "testdata", + ) + certPath := filepath.Join(certsFolder, "server.crt") + keyPath := filepath.Join(certsFolder, "server.key") + + serverCalledSuccessfully := false + + server := testserver.NewUnstarted(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverCalledSuccessfully = true + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"count": 0}`)) + })) + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + require.NoError(t, err) + + server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} + server.StartTLS() + + var certsString string + rootCA, err := os.ReadFile(filepath.Join(certsFolder, "rootCA.crt")) + require.NoError(t, err) + certsString = string(rootCA) + + intermediate, err := os.ReadFile(filepath.Join(certsFolder, "intermediate.pem")) + require.NoError(t, err) + certsString = certsString + string(intermediate) + + config := RunnerScaleSetListenerConfig{ + ConfigureUrl: server.ConfigURLForOrg("myorg"), + ServerRootCA: certsString, + } + creds := &actions.ActionsAuth{ + Token: "token", + } + + client, err := newActionsClientFromConfig(config, creds) + require.NoError(t, err) + _, err = client.GetRunnerScaleSet(ctx, "test") + require.NoError(t, err) + assert.True(t, serverCalledSuccessfully) +} + func TestProxySettings(t *testing.T) { t.Run("http", func(t *testing.T) { wentThroughProxy := false diff --git a/config/crd/bases/actions.github.com_autoscalinglisteners.yaml b/config/crd/bases/actions.github.com_autoscalinglisteners.yaml index f0f3f8fb11..6df9c05192 100644 --- a/config/crd/bases/actions.github.com_autoscalinglisteners.yaml +++ b/config/crd/bases/actions.github.com_autoscalinglisteners.yaml @@ -55,6 +55,28 @@ spec: githubConfigUrl: description: Required type: string + githubServerTLS: + properties: + certificateFrom: + description: Required + properties: + configMapKeyRef: + description: Required + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + type: object + type: object image: description: Required type: string diff --git a/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml index 12c4b5b837..6c4c82cbf3 100644 --- a/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml +++ b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml @@ -51,9 +51,25 @@ spec: type: string githubServerTLS: properties: - certConfigMapRef: + certificateFrom: description: Required - type: string + properties: + configMapKeyRef: + description: Required + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + type: object type: object maxRunners: minimum: 0 diff --git a/config/crd/bases/actions.github.com_ephemeralrunners.yaml b/config/crd/bases/actions.github.com_ephemeralrunners.yaml index 41cdc81b45..b0ce1e4b83 100644 --- a/config/crd/bases/actions.github.com_ephemeralrunners.yaml +++ b/config/crd/bases/actions.github.com_ephemeralrunners.yaml @@ -64,9 +64,25 @@ spec: type: string githubServerTLS: properties: - certConfigMapRef: + certificateFrom: description: Required - type: string + properties: + configMapKeyRef: + description: Required + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + type: object type: object metadata: description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' diff --git a/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml b/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml index 072cd265fb..86a3f40ce7 100644 --- a/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml +++ b/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml @@ -46,9 +46,25 @@ spec: type: string githubServerTLS: properties: - certConfigMapRef: + certificateFrom: description: Required - type: string + properties: + configMapKeyRef: + description: Required + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + type: object type: object metadata: description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' diff --git a/controllers/actions.github.com/autoscalinglistener_controller.go b/controllers/actions.github.com/autoscalinglistener_controller.go index 5ad3f0c3df..e82fc8da1f 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller.go +++ b/controllers/actions.github.com/autoscalinglistener_controller.go @@ -423,6 +423,15 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a } } + if autoscalingListener.Spec.GitHubServerTLS != nil { + env, err := r.certificateEnvVarForListener(ctx, autoscalingRunnerSet, autoscalingListener) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create certificate env var for listener: %v", err) + } + + envs = append(envs, env) + } + newPod := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, serviceAccount, secret, envs...) if err := ctrl.SetControllerReference(autoscalingListener, newPod, r.Scheme); err != nil { @@ -439,6 +448,47 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a return ctrl.Result{}, nil } +func (r *AutoscalingListenerReconciler) certificateEnvVarForListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener) (corev1.EnvVar, error) { + if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom == nil { + return corev1.EnvVar{}, fmt.Errorf("githubServerTLS.certificateFrom is not specified") + } + + if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef == nil { + return corev1.EnvVar{}, fmt.Errorf("githubServerTLS.certificateFrom.configMapKeyRef is not specified") + } + + var configmap corev1.ConfigMap + err := r.Get( + ctx, + types.NamespacedName{ + Namespace: autoscalingRunnerSet.Namespace, + Name: autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Name, + }, + &configmap, + ) + if err != nil { + return corev1.EnvVar{}, fmt.Errorf( + "failed to get configmap %s: %w", + autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Name, + err, + ) + } + + certificate, ok := configmap.Data[autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Key] + if !ok { + return corev1.EnvVar{}, fmt.Errorf( + "key %s is not found in configmap %s", + autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Key, + autoscalingListener.Spec.GitHubServerTLS.CertificateFrom.ConfigMapKeyRef.Name, + ) + } + + return corev1.EnvVar{ + Name: "GITHUB_SERVER_ROOT_CA", + Value: certificate, + }, nil +} + func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) { newListenerSecret := r.resourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret) diff --git a/controllers/actions.github.com/autoscalinglistener_controller_test.go b/controllers/actions.github.com/autoscalinglistener_controller_test.go index 487d9eb602..d493279761 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller_test.go +++ b/controllers/actions.github.com/autoscalinglistener_controller_test.go @@ -3,6 +3,8 @@ package actionsgithubcom import ( "context" "fmt" + "os" + "path/filepath" "time" corev1 "k8s.io/api/core/v1" @@ -554,3 +556,165 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() { autoscalingListenerTestInterval).Should(Succeed(), "failed to delete secret with proxy details") }) }) + +var _ = Describe("Test GitHub Server TLS configuration", func() { + var ctx context.Context + var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace + var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet + var configSecret *corev1.Secret + var autoscalingListener *actionsv1alpha1.AutoscalingListener + var rootCAConfigMap *corev1.ConfigMap + + BeforeEach(func() { + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) + + cert, err := os.ReadFile(filepath.Join( + "../../", + "github", + "actions", + "testdata", + "rootCA.crt", + )) + Expect(err).NotTo(HaveOccurred(), "failed to read root CA cert") + rootCAConfigMap = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "root-ca-configmap", + Namespace: autoscalingNS.Name, + }, + Data: map[string]string{ + "rootCA.crt": string(cert), + }, + } + err = k8sClient.Create(ctx, rootCAConfigMap) + Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs") + + controller := &AutoscalingListenerReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + } + err = controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + min := 1 + max := 10 + autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &actionsv1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: rootCAConfigMap.Name, + }, + Key: "rootCA.crt", + }, + }, + }, + MaxRunners: &max, + MinRunners: &min, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + err = k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + autoscalingListener = &actionsv1alpha1.AutoscalingListener{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asl", + Namespace: autoscalingNS.Name, + }, + Spec: actionsv1alpha1.AutoscalingListenerSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &actionsv1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: rootCAConfigMap.Name, + }, + Key: "rootCA.crt", + }, + }, + }, + RunnerScaleSetId: 1, + AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace, + AutoscalingRunnerSetName: autoscalingRunnerSet.Name, + EphemeralRunnerSetName: "test-ers", + MaxRunners: 10, + MinRunners: 1, + Image: "ghcr.io/owner/repo", + }, + } + + err = k8sClient.Create(ctx, autoscalingListener) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingListener") + + startManagers(GinkgoT(), mgr) + }) + + Context("When creating a new AutoScalingListener", func() { + It("It should set the certificates as an environment variable on the pod", func() { + pod := new(corev1.Pod) + Eventually( + func(g Gomega) { + err := k8sClient.Get( + ctx, + client.ObjectKey{ + Name: autoscalingListener.Name, + Namespace: autoscalingListener.Namespace, + }, + pod, + ) + + g.Expect(err).NotTo(HaveOccurred(), "failed to get pod") + g.Expect(pod.Spec.Containers).NotTo(BeEmpty(), "pod should have containers") + g.Expect(pod.Spec.Containers[0].Env).NotTo(BeEmpty(), "pod should have env variables") + + var env *corev1.EnvVar + for _, e := range pod.Spec.Containers[0].Env { + if e.Name == "GITHUB_SERVER_ROOT_CA" { + env = &e + break + } + } + g.Expect(env).NotTo(BeNil(), "pod should have an env variable named GITHUB_SERVER_ROOT_CA_PATH") + + cert, err := os.ReadFile(filepath.Join( + "../../", + "github", + "actions", + "testdata", + "rootCA.crt", + )) + g.Expect(err).NotTo(HaveOccurred(), "failed to read rootCA.crt") + + g.Expect(env.Value).To( + BeEquivalentTo(string(cert)), + "GITHUB_SERVER_ROOT_CA should be the rootCA.crt", + ) + }). + WithTimeout(autoscalingRunnerSetTestTimeout). + WithPolling(autoscalingListenerTestInterval). + Should(Succeed(), "failed to create pod with volume and env variable") + }) + }) +}) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 1c77acd904..c7e95201e5 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -541,7 +541,23 @@ func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, a return nil, fmt.Errorf("failed to find GitHub config secret: %w", err) } - var opts []actions.ClientOption + opts, err := r.actionsClientOptionsFor(ctx, autoscalingRunnerSet) + if err != nil { + return nil, fmt.Errorf("failed to get actions client options: %w", err) + } + + return r.ActionsClient.GetClientFromSecret( + ctx, + autoscalingRunnerSet.Spec.GitHubConfigUrl, + autoscalingRunnerSet.Namespace, + configSecret.Data, + opts..., + ) +} + +func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) ([]actions.ClientOption, error) { + var options []actions.ClientOption + if autoscalingRunnerSet.Spec.Proxy != nil { proxyFunc, err := autoscalingRunnerSet.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) { var secret corev1.Secret @@ -556,16 +572,35 @@ func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, a return nil, fmt.Errorf("failed to get proxy func: %w", err) } - opts = append(opts, actions.WithProxy(proxyFunc)) + options = append(options, actions.WithProxy(proxyFunc)) } - return r.ActionsClient.GetClientFromSecret( - ctx, - autoscalingRunnerSet.Spec.GitHubConfigUrl, - autoscalingRunnerSet.Namespace, - configSecret.Data, - opts..., - ) + tlsConfig := autoscalingRunnerSet.Spec.GitHubServerTLS + if tlsConfig != nil { + pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) { + var configmap corev1.ConfigMap + err := r.Get( + ctx, + types.NamespacedName{ + Namespace: autoscalingRunnerSet.Namespace, + Name: name, + }, + &configmap, + ) + if err != nil { + return nil, fmt.Errorf("failed to get configmap %s: %w", name, err) + } + + return []byte(configmap.Data[key]), nil + }) + if err != nil { + return nil, fmt.Errorf("failed to get tls config: %w", err) + } + + options = append(options, actions.WithRootCAs(pool)) + } + + return options, nil } // SetupWithManager sets up the controller with the Manager. diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index e999fc7e6c..aa2ae57d30 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -2,10 +2,13 @@ package actionsgithubcom import ( "context" + "crypto/tls" "encoding/base64" "fmt" "net/http" "net/http/httptest" + "os" + "path/filepath" "strings" "time" @@ -787,4 +790,242 @@ var _ = Describe("Test Client optional configuration", func() { ).Should(BeTrue(), "server was not called") }) }) + + Context("When specifying a configmap for root CAs", func() { + var ctx context.Context + var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace + var configSecret *corev1.Secret + var rootCAConfigMap *corev1.ConfigMap + var controller *AutoscalingRunnerSetReconciler + + BeforeEach(func() { + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) + + cert, err := os.ReadFile(filepath.Join( + "../../", + "github", + "actions", + "testdata", + "rootCA.crt", + )) + Expect(err).NotTo(HaveOccurred(), "failed to read root CA cert") + rootCAConfigMap = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "root-ca-configmap", + Namespace: autoscalingNS.Name, + }, + Data: map[string]string{ + "rootCA.crt": string(cert), + }, + } + err = k8sClient.Create(ctx, rootCAConfigMap) + Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs") + + controller = &AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ControllerNamespace: autoscalingNS.Name, + DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", + ActionsClient: fake.NewMultiClient(), + } + err = controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + startManagers(GinkgoT(), mgr) + }) + + It("should be able to make requests to a server using root CAs", func() { + controller.ActionsClient = actions.NewMultiClient("test", logr.Discard()) + + certsFolder := filepath.Join( + "../../", + "github", + "actions", + "testdata", + ) + certPath := filepath.Join(certsFolder, "server.crt") + keyPath := filepath.Join(certsFolder, "server.key") + + serverSuccessfullyCalled := false + server := testserver.NewUnstarted(GinkgoT(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverSuccessfullyCalled = true + w.WriteHeader(http.StatusOK) + })) + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + Expect(err).NotTo(HaveOccurred(), "failed to load server cert") + + server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} + server.StartTLS() + + min := 1 + max := 10 + autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: server.ConfigURLForOrg("my-org"), + GitHubConfigSecret: configSecret.Name, + GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: rootCAConfigMap.Name, + }, + Key: "rootCA.crt", + }, + }, + }, + MaxRunners: &max, + MinRunners: &min, + RunnerGroup: "testgroup", + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + err = k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + // wait for server to be called + Eventually( + func() (bool, error) { + return serverSuccessfullyCalled, nil + }, + autoscalingRunnerSetTestTimeout, + 1*time.Nanosecond, + ).Should(BeTrue(), "server was not called") + }) + + It("it creates a listener referencing the right configmap for TLS", func() { + min := 1 + max := 10 + autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: rootCAConfigMap.Name, + }, + Key: "rootCA.crt", + }, + }, + }, + MaxRunners: &max, + MinRunners: &min, + RunnerGroup: "testgroup", + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + err := k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + Eventually( + func(g Gomega) { + listener := new(v1alpha1.AutoscalingListener) + err := k8sClient.Get( + ctx, + client.ObjectKey{ + Name: scaleSetListenerName(autoscalingRunnerSet), + Namespace: autoscalingRunnerSet.Namespace, + }, + listener, + ) + g.Expect(err).NotTo(HaveOccurred(), "failed to get listener") + + g.Expect(listener.Spec.GitHubServerTLS).NotTo(BeNil(), "listener does not have TLS config") + g.Expect(listener.Spec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "listener does not have TLS config") + }, + autoscalingRunnerSetTestTimeout, + autoscalingListenerTestInterval, + ).Should(Succeed(), "tls config is incorrect") + }) + + It("it creates an ephemeral runner set referencing the right configmap for TLS", func() { + min := 1 + max := 10 + autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: rootCAConfigMap.Name, + }, + Key: "rootCA.crt", + }, + }, + }, + MaxRunners: &max, + MinRunners: &min, + RunnerGroup: "testgroup", + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + err := k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + Eventually( + func(g Gomega) { + runnerSetList := new(v1alpha1.EphemeralRunnerSetList) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) + g.Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") + g.Expect(runnerSetList.Items).To(HaveLen(1), "expected 1 EphemeralRunnerSet to be created") + + runnerSet := &runnerSetList.Items[0] + + g.Expect(runnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS).NotTo(BeNil(), "expected EphemeralRunnerSpec.GitHubServerTLS to be set") + g.Expect(runnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "EphemeralRunnerSpec does not have TLS config") + }, + autoscalingRunnerSetTestTimeout, + autoscalingListenerTestInterval, + ).Should(Succeed()) + }) + }) }) diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go index 516526a3a0..f697091b55 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller.go +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -680,6 +680,21 @@ func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner return nil, fmt.Errorf("failed to get secret: %w", err) } + opts, err := r.actionsClientOptionsFor(ctx, runner) + if err != nil { + return nil, fmt.Errorf("failed to get actions client options: %w", err) + } + + return r.ActionsClient.GetClientFromSecret( + ctx, + runner.Spec.GitHubConfigUrl, + runner.Namespace, + secret.Data, + opts..., + ) +} + +func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) ([]actions.ClientOption, error) { var opts []actions.ClientOption if runner.Spec.Proxy != nil { proxyFunc, err := runner.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) { @@ -698,13 +713,32 @@ func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner opts = append(opts, actions.WithProxy(proxyFunc)) } - return r.ActionsClient.GetClientFromSecret( - ctx, - runner.Spec.GitHubConfigUrl, - runner.Namespace, - secret.Data, - opts..., - ) + tlsConfig := runner.Spec.GitHubServerTLS + if tlsConfig != nil { + pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) { + var configmap corev1.ConfigMap + err := r.Get( + ctx, + types.NamespacedName{ + Namespace: runner.Namespace, + Name: name, + }, + &configmap, + ) + if err != nil { + return nil, fmt.Errorf("failed to get configmap %s: %w", name, err) + } + + return []byte(configmap.Data[key]), nil + }) + if err != nil { + return nil, fmt.Errorf("failed to get tls config: %w", err) + } + + opts = append(opts, actions.WithRootCAs(pool)) + } + + return opts, nil } // runnerRegisteredWithService checks if the runner is still registered with the service diff --git a/controllers/actions.github.com/ephemeralrunner_controller_test.go b/controllers/actions.github.com/ephemeralrunner_controller_test.go index b5e064b1aa..03086099bd 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunner_controller_test.go @@ -2,10 +2,13 @@ package actionsgithubcom import ( "context" + "crypto/tls" "encoding/base64" "fmt" "net/http" "net/http/httptest" + "os" + "path/filepath" "strings" "time" @@ -14,6 +17,7 @@ import ( "github.com/go-logr/logr" "github.com/actions/actions-runner-controller/github/actions/fake" + "github.com/actions/actions-runner-controller/github/actions/testserver" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -841,4 +845,100 @@ var _ = Describe("EphemeralRunner", func() { })) }) }) + + Describe("TLS config", func() { + var ctx context.Context + var mgr ctrl.Manager + var autoScalingNS *corev1.Namespace + var configSecret *corev1.Secret + var controller *EphemeralRunnerReconciler + var rootCAConfigMap *corev1.ConfigMap + + BeforeEach(func() { + ctx = context.Background() + autoScalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoScalingNS.Name) + + cert, err := os.ReadFile(filepath.Join( + "../../", + "github", + "actions", + "testdata", + "rootCA.crt", + )) + Expect(err).NotTo(HaveOccurred(), "failed to read root CA cert") + rootCAConfigMap = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "root-ca-configmap", + Namespace: autoScalingNS.Name, + }, + Data: map[string]string{ + "rootCA.crt": string(cert), + }, + } + err = k8sClient.Create(ctx, rootCAConfigMap) + Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs") + + controller = &EphemeralRunnerReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ActionsClient: fake.NewMultiClient(), + } + + err = controller.SetupWithManager(mgr) + Expect(err).To(BeNil(), "failed to setup controller") + + startManagers(GinkgoT(), mgr) + }) + + It("should be able to make requests to a server using root CAs", func() { + certsFolder := filepath.Join( + "../../", + "github", + "actions", + "testdata", + ) + certPath := filepath.Join(certsFolder, "server.crt") + keyPath := filepath.Join(certsFolder, "server.key") + + serverSuccessfullyCalled := false + server := testserver.NewUnstarted(GinkgoT(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverSuccessfullyCalled = true + w.WriteHeader(http.StatusOK) + })) + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + Expect(err).NotTo(HaveOccurred(), "failed to load server cert") + + server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} + server.StartTLS() + + // Use an actual client + controller.ActionsClient = actions.NewMultiClient("test", logr.Discard()) + + ephemeralRunner := newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name) + ephemeralRunner.Spec.GitHubConfigUrl = server.ConfigURLForOrg("my-org") + ephemeralRunner.Spec.GitHubServerTLS = &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: rootCAConfigMap.Name, + }, + Key: "rootCA.crt", + }, + }, + } + + err = k8sClient.Create(ctx, ephemeralRunner) + Expect(err).To(BeNil(), "failed to create ephemeral runner") + + Eventually( + func() bool { + return serverSuccessfullyCalled + }, + 2*time.Second, + interval, + ).Should(BeTrue(), "failed to contact server") + }) + }) }) diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index 29f40e0da0..0db5840976 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -450,6 +450,22 @@ func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil { return nil, fmt.Errorf("failed to get secret: %w", err) } + + opts, err := r.actionsClientOptionsFor(ctx, rs) + if err != nil { + return nil, fmt.Errorf("failed to get actions client options: %w", err) + } + + return r.ActionsClient.GetClientFromSecret( + ctx, + rs.Spec.EphemeralRunnerSpec.GitHubConfigUrl, + rs.Namespace, + secret.Data, + opts..., + ) +} + +func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) ([]actions.ClientOption, error) { var opts []actions.ClientOption if rs.Spec.EphemeralRunnerSpec.Proxy != nil { proxyFunc, err := rs.Spec.EphemeralRunnerSpec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) { @@ -468,13 +484,32 @@ func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs opts = append(opts, actions.WithProxy(proxyFunc)) } - return r.ActionsClient.GetClientFromSecret( - ctx, - rs.Spec.EphemeralRunnerSpec.GitHubConfigUrl, - rs.Namespace, - secret.Data, - opts..., - ) + tlsConfig := rs.Spec.EphemeralRunnerSpec.GitHubServerTLS + if tlsConfig != nil { + pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) { + var configmap corev1.ConfigMap + err := r.Get( + ctx, + types.NamespacedName{ + Namespace: rs.Namespace, + Name: name, + }, + &configmap, + ) + if err != nil { + return nil, fmt.Errorf("failed to get configmap %s: %w", name, err) + } + + return []byte(configmap.Data[key]), nil + }) + if err != nil { + return nil, fmt.Errorf("failed to get tls config: %w", err) + } + + opts = append(opts, actions.WithRootCAs(pool)) + } + + return opts, nil } // SetupWithManager sets up the controller with the Manager. diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go index e2ad842b11..d51698ab09 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -2,10 +2,13 @@ package actionsgithubcom import ( "context" + "crypto/tls" "encoding/base64" "fmt" "net/http" "net/http/httptest" + "os" + "path/filepath" "strings" "time" @@ -24,6 +27,7 @@ import ( v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions/fake" + "github.com/actions/actions-runner-controller/github/actions/testserver" ) const ( @@ -834,3 +838,148 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( ).Should(BeEquivalentTo(true)) }) }) + +var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func() { + var ctx context.Context + var mgr ctrl.Manager + var autoscalingNS *corev1.Namespace + var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet + var configSecret *corev1.Secret + var rootCAConfigMap *corev1.ConfigMap + + BeforeEach(func() { + ctx = context.Background() + autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) + configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) + + cert, err := os.ReadFile(filepath.Join( + "../../", + "github", + "actions", + "testdata", + "rootCA.crt", + )) + Expect(err).NotTo(HaveOccurred(), "failed to read root CA cert") + rootCAConfigMap = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "root-ca-configmap", + Namespace: autoscalingNS.Name, + }, + Data: map[string]string{ + "rootCA.crt": string(cert), + }, + } + err = k8sClient.Create(ctx, rootCAConfigMap) + Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs") + + controller := &EphemeralRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ActionsClient: actions.NewMultiClient("test", logr.Discard()), + } + err = controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + startManagers(GinkgoT(), mgr) + }) + + It("should be able to make requests to a server using root CAs", func() { + certsFolder := filepath.Join( + "../../", + "github", + "actions", + "testdata", + ) + certPath := filepath.Join(certsFolder, "server.crt") + keyPath := filepath.Join(certsFolder, "server.key") + + serverSuccessfullyCalled := false + server := testserver.NewUnstarted(GinkgoT(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverSuccessfullyCalled = true + w.WriteHeader(http.StatusOK) + })) + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + Expect(err).NotTo(HaveOccurred(), "failed to load server cert") + + server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} + server.StartTLS() + + ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + }, + Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ + Replicas: 1, + EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ + GitHubConfigUrl: server.ConfigURLForOrg("my-org"), + GitHubConfigSecret: configSecret.Name, + GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: rootCAConfigMap.Name, + }, + Key: "rootCA.crt", + }, + }, + }, + RunnerScaleSetId: 100, + PodTemplateSpec: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + }, + } + + err = k8sClient.Create(ctx, ephemeralRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet") + + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + Eventually(func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(1), "failed to create ephemeral runner") + + runner := runnerList.Items[0].DeepCopy() + Expect(runner.Spec.GitHubServerTLS).NotTo(BeNil(), "runner tls config should not be nil") + Expect(runner.Spec.GitHubServerTLS).To(BeEquivalentTo(ephemeralRunnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS), "runner tls config should be correct") + + runner.Status.Phase = corev1.PodRunning + runner.Status.RunnerId = 100 + err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0])) + Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status") + + updatedRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, updatedRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updatedRunnerSet.Spec.Replicas = 0 + err = k8sClient.Update(ctx, updatedRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + // wait for server to be called + Eventually( + func() bool { + return serverSuccessfullyCalled + }, + autoscalingRunnerSetTestTimeout, + 1*time.Nanosecond, + ).Should(BeTrue(), "server was not called") + }) +}) diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go index b8f7512d6d..dd555289f5 100644 --- a/controllers/actions.github.com/resourcebuilder.go +++ b/controllers/actions.github.com/resourcebuilder.go @@ -307,6 +307,7 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1. Image: image, ImagePullSecrets: imagePullSecrets, Proxy: autoscalingRunnerSet.Spec.Proxy, + GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS, }, } diff --git a/controllers/actions.github.com/suite_test.go b/controllers/actions.github.com/suite_test.go index c0a12a37dc..80fb4196f5 100644 --- a/controllers/actions.github.com/suite_test.go +++ b/controllers/actions.github.com/suite_test.go @@ -39,9 +39,11 @@ import ( // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment +) func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) diff --git a/github/actions/actions_server_test.go b/github/actions/actions_server_test.go index 2638435ace..e2580bd4e1 100644 --- a/github/actions/actions_server_test.go +++ b/github/actions/actions_server_test.go @@ -58,12 +58,6 @@ func newActionsServer(t *testing.T, handler http.Handler, options ...actionsServ type actionsServerOption func(*actionsServer) -func withActionsToken(token string) actionsServerOption { - return func(s *actionsServer) { - s.token = token - } -} - type actionsServer struct { *httptest.Server diff --git a/github/actions/client.go b/github/actions/client.go index 4574b3546d..7d68bd391a 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -198,6 +198,12 @@ func (c *Client) Identifier() string { ) } + if c.rootCAs != nil { + // ignoring because this cert pool is intended not to come from SystemCertPool + // nolint:staticcheck + identifier += fmt.Sprintf("rootCAs:%q", c.rootCAs.Subjects()) + } + return uuid.NewHash(sha256.New(), uuid.NameSpaceOID, []byte(identifier), 6).String() } diff --git a/github/actions/client_tls_test.go b/github/actions/client_tls_test.go index 5e7190b57c..297339c0dd 100644 --- a/github/actions/client_tls_test.go +++ b/github/actions/client_tls_test.go @@ -95,8 +95,8 @@ func TestServerWithSelfSignedCertificates(t *testing.T) { cert, err := os.ReadFile(filepath.Join("testdata", "rootCA.crt")) require.NoError(t, err) - pool, err := actions.RootCAsFromConfigMap(map[string][]byte{"cert": cert}) - require.NoError(t, err) + pool := x509.NewCertPool() + require.True(t, pool.AppendCertsFromPEM(cert)) client, err := actions.NewClient(configURL, auth, actions.WithRootCAs(pool)) require.NoError(t, err) @@ -123,8 +123,8 @@ func TestServerWithSelfSignedCertificates(t *testing.T) { cert, err := os.ReadFile(filepath.Join("testdata", "intermediate.pem")) require.NoError(t, err) - pool, err := actions.RootCAsFromConfigMap(map[string][]byte{"cert": cert}) - require.NoError(t, err) + pool := x509.NewCertPool() + require.True(t, pool.AppendCertsFromPEM(cert)) client, err := actions.NewClient(configURL, auth, actions.WithRootCAs(pool), actions.WithRetryMax(0)) require.NoError(t, err) diff --git a/github/actions/github_api_request_test.go b/github/actions/github_api_request_test.go index 3a378149ef..fef7b58f41 100644 --- a/github/actions/github_api_request_test.go +++ b/github/actions/github_api_request_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/actions/actions-runner-controller/github/actions" + "github.com/actions/actions-runner-controller/github/actions/testserver" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -95,9 +96,9 @@ func TestNewActionsServiceRequest(t *testing.T) { t.Run("manages authentication", func(t *testing.T) { t.Run("client is brand new", func(t *testing.T) { token := defaultActionsToken(t) - server := newActionsServer(t, nil, withActionsToken(token)) + server := testserver.New(t, nil, testserver.WithActionsToken(token)) - client, err := actions.NewClient(server.configURLForOrg("my-org"), defaultCreds) + client, err := actions.NewClient(server.ConfigURLForOrg("my-org"), defaultCreds) require.NoError(t, err) req, err := client.NewActionsServiceRequest(ctx, http.MethodGet, "my-path", nil) @@ -108,9 +109,9 @@ func TestNewActionsServiceRequest(t *testing.T) { t.Run("admin token is about to expire", func(t *testing.T) { newToken := defaultActionsToken(t) - server := newActionsServer(t, nil, withActionsToken(newToken)) + server := testserver.New(t, nil, testserver.WithActionsToken(newToken)) - client, err := actions.NewClient(server.configURLForOrg("my-org"), defaultCreds) + client, err := actions.NewClient(server.ConfigURLForOrg("my-org"), defaultCreds) require.NoError(t, err) client.ActionsServiceAdminToken = "expiring-token" client.ActionsServiceAdminTokenExpiresAt = time.Now().Add(59 * time.Second) @@ -123,9 +124,9 @@ func TestNewActionsServiceRequest(t *testing.T) { t.Run("token is currently valid", func(t *testing.T) { tokenThatShouldNotBeFetched := defaultActionsToken(t) - server := newActionsServer(t, nil, withActionsToken(tokenThatShouldNotBeFetched)) + server := testserver.New(t, nil, testserver.WithActionsToken(tokenThatShouldNotBeFetched)) - client, err := actions.NewClient(server.configURLForOrg("my-org"), defaultCreds) + client, err := actions.NewClient(server.ConfigURLForOrg("my-org"), defaultCreds) require.NoError(t, err) client.ActionsServiceAdminToken = "healthy-token" client.ActionsServiceAdminTokenExpiresAt = time.Now().Add(1 * time.Hour) @@ -138,9 +139,9 @@ func TestNewActionsServiceRequest(t *testing.T) { }) t.Run("builds the right URL including api version", func(t *testing.T) { - server := newActionsServer(t, nil) + server := testserver.New(t, nil) - client, err := actions.NewClient(server.configURLForOrg("my-org"), defaultCreds) + client, err := actions.NewClient(server.ConfigURLForOrg("my-org"), defaultCreds) require.NoError(t, err) req, err := client.NewActionsServiceRequest(ctx, http.MethodGet, "/my/path?name=banana", nil) @@ -157,9 +158,9 @@ func TestNewActionsServiceRequest(t *testing.T) { }) t.Run("populates header", func(t *testing.T) { - server := newActionsServer(t, nil) + server := testserver.New(t, nil) - client, err := actions.NewClient(server.configURLForOrg("my-org"), defaultCreds, actions.WithUserAgent("my-agent")) + client, err := actions.NewClient(server.ConfigURLForOrg("my-org"), defaultCreds, actions.WithUserAgent("my-agent")) require.NoError(t, err) req, err := client.NewActionsServiceRequest(ctx, http.MethodGet, "/my/path", nil) diff --git a/github/actions/identifier_test.go b/github/actions/identifier_test.go index 0a184f86cd..60c08f3b8d 100644 --- a/github/actions/identifier_test.go +++ b/github/actions/identifier_test.go @@ -1,6 +1,9 @@ package actions_test import ( + "crypto/x509" + "os" + "path/filepath" "testing" "github.com/actions/actions-runner-controller/github/actions" @@ -108,4 +111,48 @@ func TestClient_Identifier(t *testing.T) { }) } }) + + t.Run("changes in TLS config", func(t *testing.T) { + configURL := "https://github.com/org/repo" + defaultCreds := &actions.ActionsAuth{ + Token: "token", + } + + noTlS, err := actions.NewClient(configURL, defaultCreds) + require.NoError(t, err) + + poolFromCert := func(t *testing.T, path string) *x509.CertPool { + t.Helper() + f, err := os.ReadFile(path) + require.NoError(t, err) + pool := x509.NewCertPool() + require.True(t, pool.AppendCertsFromPEM(f)) + return pool + } + + root, err := actions.NewClient( + configURL, + defaultCreds, + actions.WithRootCAs(poolFromCert(t, filepath.Join("testdata", "rootCA.crt"))), + ) + require.NoError(t, err) + + chain, err := actions.NewClient( + configURL, + defaultCreds, + actions.WithRootCAs(poolFromCert(t, filepath.Join("testdata", "intermediate.pem"))), + ) + require.NoError(t, err) + + clients := []*actions.Client{ + noTlS, + root, + chain, + } + identifiers := map[string]struct{}{} + for _, client := range clients { + identifiers[client.Identifier()] = struct{}{} + } + assert.Len(t, identifiers, len(clients), "all clients should have a unique identifier") + }) } diff --git a/github/actions/multi_client.go b/github/actions/multi_client.go index 3731687036..eff3fd65c5 100644 --- a/github/actions/multi_client.go +++ b/github/actions/multi_client.go @@ -2,7 +2,6 @@ package actions import ( "context" - "crypto/x509" "fmt" "strconv" "sync" @@ -84,7 +83,7 @@ func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, } cachedClient, has := m.clients[key] - if has { + if has && cachedClient.rootCAs.Equal(client.rootCAs) { m.logger.Info("using cache client", "githubConfigURL", githubConfigURL, "namespace", namespace) return cachedClient, nil } @@ -141,19 +140,3 @@ func (m *multiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, auth.AppCreds = &GitHubAppAuth{AppID: parsedAppID, AppInstallationID: parsedAppInstallationID, AppPrivateKey: appPrivateKey} return m.GetClientFor(ctx, githubConfigURL, auth, namespace, options...) } - -func RootCAsFromConfigMap(configMapData map[string][]byte) (*x509.CertPool, error) { - caCertPool, err := x509.SystemCertPool() - if err != nil { - caCertPool = x509.NewCertPool() - } - - for key, certData := range configMapData { - ok := caCertPool.AppendCertsFromPEM(certData) - if !ok { - return nil, fmt.Errorf("no certificates successfully parsed from key %s", key) - } - } - - return caCertPool, nil -} From de681714283ea579ab2b772725b78fb006f1bce5 Mon Sep 17 00:00:00 2001 From: Ava Stancu Date: Fri, 10 Mar 2023 12:16:07 +0100 Subject: [PATCH 121/561] Build local image and load to kind cluster (#2378) --- .github/actions/e2e-arc-test/action.yaml | 8 +++++++- .github/workflows/e2e-test-linux-vm.yaml | 21 ++++++++++++++++++++- .github/workflows/update-runners.yaml | 1 + 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/.github/actions/e2e-arc-test/action.yaml b/.github/actions/e2e-arc-test/action.yaml index 13f3586161..6573910202 100644 --- a/.github/actions/e2e-arc-test/action.yaml +++ b/.github/actions/e2e-arc-test/action.yaml @@ -8,12 +8,18 @@ inputs: config-url: description: "URL of the repo, org or enterprise where the runner scale sets will be registered" required: true + docker-image-repo: + description: "Local docker image repo for testing" + required: true + docker-image-tag: + description: "Tag of ARC Docker image for testing" + required: true runs: using: "composite" steps: - name: Install ARC - run: helm install arc --namespace "arc-systems" --create-namespace ./charts/gha-runner-scale-set-controller + run: helm install arc --namespace "arc-systems" --create-namespace --set image.tag=${{ inputs.docker-image-tag }} --set image.repository=${{ inputs.docker-image-repo }} ./charts/gha-runner-scale-set-controller shell: bash - name: Get datetime # We are using this value further in the runner installation to avoid runner name collision that are a risk with hard coded values. diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 85c282d38b..78732bc720 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -9,16 +9,33 @@ on: env: TARGET_ORG: actions-runner-controller + CLUSTER_NAME: e2e-test + RUNNER_VERSION: 2.302.1 + IMAGE_REPO: "test/test-image" jobs: setup-steps: runs-on: [ubuntu-latest] steps: - uses: actions/checkout@v3 + - name: Add env variables + run: | + TAG=$(echo "0.0.$GITHUB_SHA") + echo "TAG=$TAG" >> $GITHUB_ENV + echo "IMAGE=$(echo "$IMAGE_REPO:$TAG)" >> $GITHUB_ENV + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + version: latest + - name: Docker Build Test Image + run: | + DOCKER_CLI_EXPERIMENTAL=enabled DOCKER_BUILDKIT=1 docker buildx build --build-arg RUNNER_VERSION=$RUNNER_VERSION --build-arg TAG=$TAG -t $IMAGE . --load - name: Create Kind cluster run: | PATH=$(go env GOPATH)/bin:$PATH - kind create cluster --name e2e-test + kind create cluster --name $CLUSTER_NAME + - name: Load Image to Kind Cluster + run: kind load docker-image $IMAGE --name $CLUSTER_NAME - name: Get Token id: get_workflow_token uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db @@ -30,3 +47,5 @@ jobs: with: github-token: ${{ steps.get_workflow_token.outputs.token }} config-url: "https://github.com/actions-runner-controller/arc_e2e_test_dummy" + docker-image-repo: $IMAGE_REPO + docker-image-tag: $IMAGE_TAG diff --git a/.github/workflows/update-runners.yaml b/.github/workflows/update-runners.yaml index d97776c160..3c447d0b1f 100644 --- a/.github/workflows/update-runners.yaml +++ b/.github/workflows/update-runners.yaml @@ -93,6 +93,7 @@ jobs: sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go + sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" .github/workflows/e2e_test_linux_vm.yaml - name: Commit changes run: | From 97289a2b7a8b4490ed966703698f590985494bc6 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Fri, 10 Mar 2023 06:18:21 -0500 Subject: [PATCH 122/561] Helm chart react changes for the new runner image. (#2348) --- adrs/2022-10-17-runner-image.md | 13 +++ .../templates/_helpers.tpl | 28 ++--- .../templates/autoscalingrunnerset.yaml | 7 +- .../tests/template_test.go | 108 +++++++++++++++++- .../tests/values_dind_extra_volumes.yaml | 19 +++ .../tests/values_extra_volumes.yaml | 17 +++ .../tests/values_k8s_extra_volumes.yaml | 19 +++ charts/gha-runner-scale-set/values.yaml | 16 +-- .../ephemeralrunner_controller_test.go | 2 +- 9 files changed, 195 insertions(+), 34 deletions(-) create mode 100644 charts/gha-runner-scale-set/tests/values_dind_extra_volumes.yaml create mode 100644 charts/gha-runner-scale-set/tests/values_extra_volumes.yaml create mode 100644 charts/gha-runner-scale-set/tests/values_k8s_extra_volumes.yaml diff --git a/adrs/2022-10-17-runner-image.md b/adrs/2022-10-17-runner-image.md index 17b0b7e1c7..f9d2a88e64 100644 --- a/adrs/2022-10-17-runner-image.md +++ b/adrs/2022-10-17-runner-image.md @@ -3,6 +3,19 @@ **Status**: Done +# Breaking Changes + +We aim to provide an similar experience (as close as possible) between self-hosted and GitHub-hosted runners. To achieve this, we are making the following changes to align our self-hosted runner container image with the Ubuntu runners managed by GitHub. +Here are the changes: +- We created a USER `runner(1001)` and a GROUP `docker(123)` +- `sudo` has been on the image and the `runner` will be a passwordless sudoer. +- The runner binary was placed placed under `/home/runner/` and launched using `/home/runner/run.sh` +- The runner's work directory is `/home/runner/_work` +- `$HOME` will point to `/home/runner` +- The container image user will be the `runner(1001)` + +The latest Dockerfile can be found at: https://github.com/actions/runner/blob/main/images/Dockerfile + # Context user can bring their own runner images, the contract we have are: diff --git a/charts/gha-runner-scale-set/templates/_helpers.tpl b/charts/gha-runner-scale-set/templates/_helpers.tpl index 433d84ebaf..8511de075b 100644 --- a/charts/gha-runner-scale-set/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set/templates/_helpers.tpl @@ -83,10 +83,10 @@ imagePullSecrets: {{ $val.imagePullSecrets | toYaml -}} {{- end }} command: ["cp"] -args: ["-r", "-v", "/actions-runner/externals/.", "/actions-runner/tmpDir/"] +args: ["-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"] volumeMounts: - name: dind-externals - mountPath: /actions-runner/tmpDir + mountPath: /home/runner/tmpDir {{- end }} {{- end }} {{- end }} @@ -97,11 +97,11 @@ securityContext: privileged: true volumeMounts: - name: work - mountPath: /actions-runner/_work + mountPath: /home/runner/_work - name: dind-cert mountPath: /certs/client - name: dind-externals - mountPath: /actions-runner/externals + mountPath: /home/runner/externals {{- end }} {{- define "gha-runner-scale-set.dind-volume" -}} @@ -125,12 +125,7 @@ volumeMounts: {{- range $i, $volume := .Values.template.spec.volumes }} {{- if eq $volume.name "work" }} {{- $createWorkVolume = 0 -}} -- name: work - {{- range $key, $val := $volume }} - {{- if ne $key "name" }} - {{ $key }}: {{ $val }} - {{- end }} - {{- end }} +- {{ $volume | toYaml | nindent 2 }} {{- end }} {{- end }} {{- if eq $createWorkVolume 1 }} @@ -144,12 +139,7 @@ volumeMounts: {{- range $i, $volume := .Values.template.spec.volumes }} {{- if eq $volume.name "work" }} {{- $createWorkVolume = 0 -}} -- name: work - {{- range $key, $val := $volume }} - {{- if ne $key "name" }} - {{ $key }}: {{ $val }} - {{- end }} - {{- end }} +- {{ $volume | toYaml | nindent 2 }} {{- end }} {{- end }} {{- if eq $createWorkVolume 1 }} @@ -282,7 +272,7 @@ volumeMounts: {{- end }} {{- if $mountWork }} - name: work - mountPath: /actions-runner/_work + mountPath: /home/runner/_work {{- end }} {{- if $mountDindCert }} - name: dind-cert @@ -344,7 +334,7 @@ env: {{- end }} {{- if $setContainerHooks }} - name: ACTIONS_RUNNER_CONTAINER_HOOKS - value: /actions-runner/k8s/index.js + value: /home/runner/k8s/index.js {{- end }} {{- if $setPodName }} - name: ACTIONS_RUNNER_POD_NAME @@ -388,7 +378,7 @@ volumeMounts: {{- end }} {{- if $mountWork }} - name: work - mountPath: /actions-runner/_work + mountPath: /home/runner/_work {{- end }} {{- if $mountGitHubServerTLS }} - name: github-server-tls-cert diff --git a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index 6ec90340dd..e974be48cf 100644 --- a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -124,8 +124,13 @@ spec: {{- if eq .Values.containerMode.type "dind" }} {{- include "gha-runner-scale-set.dind-volume" . | nindent 6 }} {{- include "gha-runner-scale-set.dind-work-volume" . | nindent 6 }} + {{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }} {{- else if eq .Values.containerMode.type "kubernetes" }} {{- include "gha-runner-scale-set.kubernetes-mode-work-volume" . | nindent 6 }} + {{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }} + {{- else }} + {{- with .Values.template.spec.volumes }} + {{- toYaml . | nindent 6 }} + {{- end }} {{- end }} - {{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }} {{- end }} diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index 3493deb6ba..864e63e8b4 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -591,6 +591,98 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunners_FromValuesFile(t *te assert.Equal(t, 10, *ars.Spec.MaxRunners, "MaxRunners should be 10") } +func TestTemplateRenderedAutoScalingRunnerSet_ExtraVolumes(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + testValuesPath, err := filepath.Abs("../tests/values_extra_volumes.yaml") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + ValuesFiles: []string{testValuesPath}, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Len(t, ars.Spec.Template.Spec.Volumes, 3, "Volumes should be 3") + assert.Equal(t, "foo", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be foo") + assert.Equal(t, "bar", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be bar") + assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[2].Name, "Volume name should be work") + assert.Equal(t, "/data", ars.Spec.Template.Spec.Volumes[2].HostPath.Path, "Volume host path should be /data") +} + +func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + testValuesPath, err := filepath.Abs("../tests/values_dind_extra_volumes.yaml") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + ValuesFiles: []string{testValuesPath}, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Len(t, ars.Spec.Template.Spec.Volumes, 5, "Volumes should be 5") + assert.Equal(t, "dind-cert", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be dind-cert") + assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be dind-externals") + assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[2].Name, "Volume name should be work") + assert.Equal(t, "/data", ars.Spec.Template.Spec.Volumes[2].HostPath.Path, "Volume host path should be /data") + assert.Equal(t, "foo", ars.Spec.Template.Spec.Volumes[3].Name, "Volume name should be foo") + assert.Equal(t, "bar", ars.Spec.Template.Spec.Volumes[4].Name, "Volume name should be bar") +} + +func TestTemplateRenderedAutoScalingRunnerSet_K8S_ExtraVolumes(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + testValuesPath, err := filepath.Abs("../tests/values_k8s_extra_volumes.yaml") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + ValuesFiles: []string{testValuesPath}, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Len(t, ars.Spec.Template.Spec.Volumes, 3, "Volumes should be 3") + assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be work") + assert.Equal(t, "/data", ars.Spec.Template.Spec.Volumes[0].HostPath.Path, "Volume host path should be /data") + assert.Equal(t, "foo", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be foo") + assert.Equal(t, "bar", ars.Spec.Template.Spec.Volumes[2].Name, "Volume name should be bar") +} + func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { t.Parallel() @@ -636,7 +728,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { assert.Equal(t, "init-dind-externals", ars.Spec.Template.Spec.InitContainers[0].Name) assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.InitContainers[0].Image) assert.Equal(t, "cp", ars.Spec.Template.Spec.InitContainers[0].Command[0]) - assert.Equal(t, "-r -v /actions-runner/externals/. /actions-runner/tmpDir/", strings.Join(ars.Spec.Template.Spec.InitContainers[0].Args, " ")) + assert.Equal(t, "-r -v /home/runner/externals/. /home/runner/tmpDir/", strings.Join(ars.Spec.Template.Spec.InitContainers[0].Args, " ")) assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container") assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name) @@ -653,7 +745,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { assert.Len(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, 2, "The runner container should have 2 volume mounts, dind-cert and work") assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name) - assert.Equal(t, "/actions-runner/_work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath) + assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath) assert.False(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].ReadOnly) assert.Equal(t, "dind-cert", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name) @@ -665,13 +757,19 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { assert.True(t, *ars.Spec.Template.Spec.Containers[1].SecurityContext.Privileged) assert.Len(t, ars.Spec.Template.Spec.Containers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-cert, work and externals") assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].Name) - assert.Equal(t, "/actions-runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath) + assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath) assert.Equal(t, "dind-cert", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name) assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath) assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name) - assert.Equal(t, "/actions-runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath) + assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath) + + assert.Len(t, ars.Spec.Template.Spec.Volumes, 3, "Volumes should be 3") + assert.Equal(t, "dind-cert", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be dind-cert") + assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be dind-externals") + assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[2].Name, "Volume name should be work") + assert.NotNil(t, ars.Spec.Template.Spec.Volumes[2].EmptyDir, "Volume work should be an emptyDir") } func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T) { @@ -719,7 +817,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T) assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, "ACTIONS_RUNNER_CONTAINER_HOOKS", ars.Spec.Template.Spec.Containers[0].Env[0].Name) - assert.Equal(t, "/actions-runner/k8s/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value) + assert.Equal(t, "/home/runner/k8s/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value) assert.Equal(t, "ACTIONS_RUNNER_POD_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER", ars.Spec.Template.Spec.Containers[0].Env[2].Name) assert.Equal(t, "true", ars.Spec.Template.Spec.Containers[0].Env[2].Value) diff --git a/charts/gha-runner-scale-set/tests/values_dind_extra_volumes.yaml b/charts/gha-runner-scale-set/tests/values_dind_extra_volumes.yaml new file mode 100644 index 0000000000..f7d45ab056 --- /dev/null +++ b/charts/gha-runner-scale-set/tests/values_dind_extra_volumes.yaml @@ -0,0 +1,19 @@ +githubConfigUrl: https://github.com/actions/actions-runner-controller +githubConfigSecret: + github_token: test +template: + spec: + containers: + - name: other + image: other-image:latest + volumes: + - name: foo + emptyDir: {} + - name: bar + emptyDir: {} + - name: work + hostPath: + path: /data + type: Directory +containerMode: + type: dind \ No newline at end of file diff --git a/charts/gha-runner-scale-set/tests/values_extra_volumes.yaml b/charts/gha-runner-scale-set/tests/values_extra_volumes.yaml new file mode 100644 index 0000000000..8ac0413fef --- /dev/null +++ b/charts/gha-runner-scale-set/tests/values_extra_volumes.yaml @@ -0,0 +1,17 @@ +githubConfigUrl: https://github.com/actions/actions-runner-controller +githubConfigSecret: + github_token: test +template: + spec: + containers: + - name: other + image: other-image:latest + volumes: + - name: foo + emptyDir: {} + - name: bar + emptyDir: {} + - name: work + hostPath: + path: /data + type: Directory \ No newline at end of file diff --git a/charts/gha-runner-scale-set/tests/values_k8s_extra_volumes.yaml b/charts/gha-runner-scale-set/tests/values_k8s_extra_volumes.yaml new file mode 100644 index 0000000000..40d23883f9 --- /dev/null +++ b/charts/gha-runner-scale-set/tests/values_k8s_extra_volumes.yaml @@ -0,0 +1,19 @@ +githubConfigUrl: https://github.com/actions/actions-runner-controller +githubConfigSecret: + github_token: test +template: + spec: + containers: + - name: other + image: other-image:latest + volumes: + - name: foo + emptyDir: {} + - name: bar + emptyDir: {} + - name: work + hostPath: + path: /data + type: Directory +containerMode: + type: kubernetes \ No newline at end of file diff --git a/charts/gha-runner-scale-set/values.yaml b/charts/gha-runner-scale-set/values.yaml index 94ea1d7dcd..87677fbc5b 100644 --- a/charts/gha-runner-scale-set/values.yaml +++ b/charts/gha-runner-scale-set/values.yaml @@ -74,7 +74,7 @@ template: containers: - name: runner image: ghcr.io/actions/actions-runner:latest - command: ["/actions-runner/run.sh"] + command: ["/home/runner/run.sh"] containerMode: type: "" ## type can be set to dind or kubernetes @@ -84,10 +84,10 @@ containerMode: ## initContainers: ## - name: initExternalsInternalVolume ## image: ghcr.io/actions/actions-runner:latest - ## command: ["cp", "-r", "-v", "/actions-runner/externals/.", "/actions-runner/tmpDir/"] + ## command: ["cp", "-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"] ## volumeMounts: ## - name: externalsInternal - ## mountPath: /actions-runner/tmpDir + ## mountPath: /home/runner/tmpDir ## containers: ## - name: runner ## image: ghcr.io/actions/actions-runner:latest @@ -100,7 +100,7 @@ containerMode: ## value: /certs/client ## volumeMounts: ## - name: workingDirectoryInternal - ## mountPath: /actions-runner/_work + ## mountPath: /home/runner/_work ## - name: dinDInternal ## mountPath: /certs/client ## readOnly: true @@ -111,9 +111,9 @@ containerMode: ## volumeMounts: ## - mountPath: /certs/client ## name: dinDInternal - ## - mountPath: /actions-runner/_work + ## - mountPath: /home/runner/_work ## name: workingDirectoryInternal - ## - mountPath: /actions-runner/externals + ## - mountPath: /home/runner/externals ## name: externalsInternal ## volumes: ## - name: dinDInternal @@ -131,7 +131,7 @@ containerMode: ## image: ghcr.io/actions/actions-runner:latest ## env: ## - name: ACTIONS_RUNNER_CONTAINER_HOOKS - ## value: /actions-runner/k8s/index.js + ## value: /home/runner/k8s/index.js ## - name: ACTIONS_RUNNER_POD_NAME ## valueFrom: ## fieldRef: @@ -140,7 +140,7 @@ containerMode: ## value: "true" ## volumeMounts: ## - name: work - ## mountPath: /actions-runner/_work + ## mountPath: /home/runner/_work ## volumes: ## - name: work ## ephemeral: diff --git a/controllers/actions.github.com/ephemeralrunner_controller_test.go b/controllers/actions.github.com/ephemeralrunner_controller_test.go index 03086099bd..bd3ca0ae0b 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunner_controller_test.go @@ -63,7 +63,7 @@ func newExampleRunner(name, namespace, configSecretName string) *v1alpha1.Epheme { Name: "setup", Image: runnerImage, - Command: []string{"sh", "-c", "cp -r /actions-runner/* /runner/"}, + Command: []string{"sh", "-c", "cp -r /home/runner/* /runner/"}, VolumeMounts: []corev1.VolumeMount{ { Name: "runner", From 224ebeb93794a51bc526ccd7efb00a559c86d0fc Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Fri, 10 Mar 2023 15:22:19 +0100 Subject: [PATCH 123/561] Fix test's quotes issue (#2389) Co-authored-by: Francesco Renzi --- .github/workflows/e2e-test-linux-vm.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 78732bc720..2c1933f841 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -22,7 +22,7 @@ jobs: run: | TAG=$(echo "0.0.$GITHUB_SHA") echo "TAG=$TAG" >> $GITHUB_ENV - echo "IMAGE=$(echo "$IMAGE_REPO:$TAG)" >> $GITHUB_ENV + echo "IMAGE=$IMAGE_REPO:$TAG" >> $GITHUB_ENV - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 with: From 6c5c7dfdc25912ab416149a259de99e35add9ba3 Mon Sep 17 00:00:00 2001 From: Ava Stancu Date: Fri, 10 Mar 2023 15:57:35 +0100 Subject: [PATCH 124/561] replaced inexistent variable with correct one for tag (#2390) --- .github/workflows/e2e-test-linux-vm.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 2c1933f841..08f575b718 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -48,4 +48,4 @@ jobs: github-token: ${{ steps.get_workflow_token.outputs.token }} config-url: "https://github.com/actions-runner-controller/arc_e2e_test_dummy" docker-image-repo: $IMAGE_REPO - docker-image-tag: $IMAGE_TAG + docker-image-tag: $TAG From 7338d298a54dffabaf67ecfa11cff2c522c9d88d Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Fri, 10 Mar 2023 15:28:07 +0000 Subject: [PATCH 125/561] Prepare 0.3.0 release (#2388) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- .../Chart.yaml | 6 ++--- charts/gha-runner-scale-set/Chart.yaml | 6 ++--- .../gha-runner-scale-set-controller/README.md | 22 ++++++++++++++----- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/charts/gha-runner-scale-set-controller/Chart.yaml b/charts/gha-runner-scale-set-controller/Chart.yaml index 7016e65041..114c431126 100644 --- a/charts/gha-runner-scale-set-controller/Chart.yaml +++ b/charts/gha-runner-scale-set-controller/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.0 +version: 0.3.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.2.0" +appVersion: "0.3.0" home: https://github.com/actions/actions-runner-controller @@ -30,4 +30,4 @@ sources: maintainers: - name: actions - url: https://github.com/actions \ No newline at end of file + url: https://github.com/actions diff --git a/charts/gha-runner-scale-set/Chart.yaml b/charts/gha-runner-scale-set/Chart.yaml index 0349000d08..df3a4a9ab6 100644 --- a/charts/gha-runner-scale-set/Chart.yaml +++ b/charts/gha-runner-scale-set/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.0 +version: 0.3.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.2.0" +appVersion: "0.3.0" home: https://github.com/actions/dev-arc @@ -30,4 +30,4 @@ sources: maintainers: - name: actions - url: https://github.com/actions \ No newline at end of file + url: https://github.com/actions diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index 0e7d8d8aab..a3ce48d0b9 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -38,7 +38,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 --namespace "${NAMESPACE}" \ --create-namespace \ oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \ - --version 0.2.0 + --version 0.3.0 ``` 1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app). @@ -59,7 +59,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 --create-namespace \ --set githubConfigUrl="${GITHUB_CONFIG_URL}" \ --set githubConfigSecret.github_token="${GITHUB_PAT}" \ - oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.2.0 + oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.3.0 ``` ```bash @@ -77,7 +77,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 --set githubConfigSecret.github_app_id="${GITHUB_APP_ID}" \ --set githubConfigSecret.github_app_installation_id="${GITHUB_APP_INSTALLATION_ID}" \ --set githubConfigSecret.github_app_private_key="${GITHUB_APP_PRIVATE_KEY}" \ - oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.2.0 + oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.3.0 ``` 1. Check your installation. If everything went well, you should see the following: @@ -86,8 +86,8 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 $ helm list -n "${NAMESPACE}" NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION - arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.2.0 preview - arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.2.0 0.2.0 + arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.3.0 preview + arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.3.0 0.3.0 ``` ```bash @@ -158,6 +158,18 @@ Verify that the secret you provided is correct and that the `githubConfigUrl` yo ## Changelog +### v0.3.0 + +#### Major changes + +1. Runner pods are more similar to hosted runners [#2348](https://github.com/actions/actions-runner-controller/pull/2348) +1. Add support for self-signed CA certificates [#2268](https://github.com/actions/actions-runner-controller/pull/2268) +1. Fixed trailing slashes in config URLs breaking installations [#2381](https://github.com/actions/actions-runner-controller/pull/2381) +1. Fixed a bug where the listener pod would ignore proxy settings from env [#2366](https://github.com/actions/actions-runner-controller/pull/2366) +1. Added runner set name field making it optionally configurable [#2279](https://github.com/actions/actions-runner-controller/pull/2279) +1. Name and namespace labels of listener pod have been split [#2341](https://github.com/actions/actions-runner-controller/pull/2341) +1. Added chart name constraints validation on AutoscalingRunnerSet install [#2347](https://github.com/actions/actions-runner-controller/pull/2347) + ### v0.2.0 #### Major changes From 0596981305b1618b35e8ba14a47ba79aaed53808 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Fri, 10 Mar 2023 18:05:51 +0100 Subject: [PATCH 126/561] Refactor main.go and introduce make run-scaleset to be able to run manager locally (#2337) --- Makefile | 9 +- .../templates/deployment.yaml | 8 +- .../tests/template_test.go | 16 +- config/manager/env-replacement.yaml | 10 + config/manager/kustomization.yaml | 3 + config/manager/manager.yaml | 6 +- main.go | 173 +++++++----------- 7 files changed, 106 insertions(+), 119 deletions(-) create mode 100644 config/manager/env-replacement.yaml diff --git a/Makefile b/Makefile index e42d2ae79f..19cef42e9d 100644 --- a/Makefile +++ b/Makefile @@ -92,9 +92,14 @@ manager: generate fmt vet run: generate fmt vet manifests go run ./main.go +run-scaleset: generate fmt vet + CONTROLLER_MANAGER_POD_NAMESPACE=default \ + CONTROLLER_MANAGER_CONTAINER_IMAGE="${DOCKER_IMAGE_NAME}:${VERSION}" \ + go run ./main.go --auto-scaling-runner-set-only + # Install CRDs into a cluster install: manifests - kustomize build config/crd | kubectl apply -f - + kustomize build config/crd | kubectl apply --server-side -f - # Uninstall CRDs from a cluster uninstall: manifests @@ -103,7 +108,7 @@ uninstall: manifests # Deploy controller in the configured Kubernetes cluster in ~/.kube/config deploy: manifests cd config/manager && kustomize edit set image controller=${DOCKER_IMAGE_NAME}:${VERSION} - kustomize build config/default | kubectl apply -f - + kustomize build config/default | kubectl apply --server-side -f - # Generate manifests e.g. CRD, RBAC etc. manifests: manifests-gen-crds chart-crds diff --git a/charts/gha-runner-scale-set-controller/templates/deployment.yaml b/charts/gha-runner-scale-set-controller/templates/deployment.yaml index a35dc784f2..cae43f4e4c 100644 --- a/charts/gha-runner-scale-set-controller/templates/deployment.yaml +++ b/charts/gha-runner-scale-set-controller/templates/deployment.yaml @@ -54,10 +54,8 @@ spec: command: - "/manager" env: - - name: CONTROLLER_MANAGER_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name + - name: CONTROLLER_MANAGER_CONTAINER_IMAGE + value: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - name: CONTROLLER_MANAGER_POD_NAMESPACE valueFrom: fieldRef: @@ -98,4 +96,4 @@ spec: {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} - {{- end }} \ No newline at end of file + {{- end }} diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index 00ab04b5a4..fe4bf020bf 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -261,9 +261,11 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { assert.Nil(t, deployment.Spec.Template.Spec.Affinity) assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0) + managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev" + assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name) - assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) @@ -274,8 +276,8 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2) - assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) - assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath) + assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) + assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value) assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath) @@ -375,9 +377,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 1) assert.Equal(t, "foo", deployment.Spec.Template.Spec.Tolerations[0].Key) + managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev" + assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name) - assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, corev1.PullAlways, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) @@ -389,8 +393,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2) - assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) - assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath) + assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) + assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value) assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath) diff --git a/config/manager/env-replacement.yaml b/config/manager/env-replacement.yaml new file mode 100644 index 0000000000..7caef2cf11 --- /dev/null +++ b/config/manager/env-replacement.yaml @@ -0,0 +1,10 @@ +source: + kind: Deployment + name: controller-manager + fieldPath: spec.template.spec.containers.[name=manager].image +targets: +- select: + kind: Deployment + name: controller-manager + fieldPaths: + - spec.template.spec.containers.[name=manager].env.[name=CONTROLLER_MANAGER_CONTAINER_IMAGE].value diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index e7063a8d0a..6a60f8b38a 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -6,3 +6,6 @@ images: - name: controller newName: summerwind/actions-runner-controller newTag: dev + +replacements: +- path: env-replacement.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index d16d16987a..f90df347f2 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -50,10 +50,8 @@ spec: optional: true - name: GITHUB_APP_PRIVATE_KEY value: /etc/actions-runner-controller/github_app_private_key - - name: CONTROLLER_MANAGER_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name + - name: CONTROLLER_MANAGER_CONTAINER_IMAGE + value: CONTROLLER_MANAGER_CONTAINER_IMAGE - name: CONTROLLER_MANAGER_POD_NAMESPACE valueFrom: fieldRef: diff --git a/main.go b/main.go index 6cdd4ef744..fbf8b3ff43 100644 --- a/main.go +++ b/main.go @@ -17,7 +17,6 @@ limitations under the License. package main import ( - "context" "flag" "fmt" "os" @@ -33,9 +32,7 @@ import ( "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/logging" "github.com/kelseyhightower/envconfig" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" @@ -47,9 +44,7 @@ const ( defaultDockerImage = "docker:dind" ) -var ( - scheme = runtime.NewScheme() -) +var scheme = runtime.NewScheme() func init() { _ = clientgoscheme.AddToScheme(scheme) @@ -68,6 +63,7 @@ func (i *stringSlice) Set(value string) error { *i = append(*i, value) return nil } + func main() { var ( err error @@ -170,17 +166,69 @@ func main() { os.Exit(1) } - multiClient := actionssummerwindnet.NewMultiGitHubClient( - mgr.GetClient(), - ghClient, - ) + if autoScalingRunnerSetOnly { + managerImage := os.Getenv("CONTROLLER_MANAGER_CONTAINER_IMAGE") + if managerImage == "" { + log.Error(err, "unable to obtain listener image") + os.Exit(1) + } + managerNamespace := os.Getenv("CONTROLLER_MANAGER_POD_NAMESPACE") + if managerNamespace == "" { + log.Error(err, "unable to obtain manager pod namespace") + os.Exit(1) + } - actionsMultiClient := actions.NewMultiClient( - "actions-runner-controller/"+build.Version, - log.WithName("actions-clients"), - ) + actionsMultiClient := actions.NewMultiClient( + "actions-runner-controller/"+build.Version, + log.WithName("actions-clients"), + ) + + if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("AutoscalingRunnerSet"), + Scheme: mgr.GetScheme(), + ControllerNamespace: managerNamespace, + DefaultRunnerScaleSetListenerImage: managerImage, + ActionsClient: actionsMultiClient, + DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets, + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet") + os.Exit(1) + } + + if err = (&actionsgithubcom.EphemeralRunnerReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("EphemeralRunner"), + Scheme: mgr.GetScheme(), + ActionsClient: actionsMultiClient, + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "EphemeralRunner") + os.Exit(1) + } + + if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("EphemeralRunnerSet"), + Scheme: mgr.GetScheme(), + ActionsClient: actionsMultiClient, + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet") + os.Exit(1) + } + if err = (&actionsgithubcom.AutoscalingListenerReconciler{ + Client: mgr.GetClient(), + Log: log.WithName("AutoscalingListener"), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "AutoscalingListener") + os.Exit(1) + } + } else { + multiClient := actionssummerwindnet.NewMultiGitHubClient( + mgr.GetClient(), + ghClient, + ) - if !autoScalingRunnerSetOnly { runnerReconciler := &actionssummerwindnet.RunnerReconciler{ Client: mgr.GetClient(), Log: log.WithName("runner"), @@ -314,94 +362,15 @@ func main() { log.Error(err, "unable to create webhook", "webhook", "RunnerReplicaSet") os.Exit(1) } - } - } - - // We use this environment avariable to turn on the ScaleSet related controllers. - // Otherwise ARC's legacy chart is unable to deploy a working ARC controller-manager pod, - // due to that the chart does not contain new actions.* CRDs while ARC requires those CRDs. - // - // We might have used a more explicitly named environment variable for this, - // e.g. "CONTROLLER_MANAGER_ENABLE_SCALE_SET" to explicitly enable the new controllers, - // or "CONTROLLER_MANAGER_DISABLE_SCALE_SET" to explicitly disable the new controllers. - // However, doing so would affect either private ARC testers or current ARC users - // who run ARC without those variabls. - mgrPodName := os.Getenv("CONTROLLER_MANAGER_POD_NAME") - if mgrPodName != "" { - mgrPodNamespace := os.Getenv("CONTROLLER_MANAGER_POD_NAMESPACE") - var mgrPod corev1.Pod - err = mgr.GetAPIReader().Get(context.Background(), types.NamespacedName{Namespace: mgrPodNamespace, Name: mgrPodName}, &mgrPod) - if err != nil { - log.Error(err, fmt.Sprintf("unable to obtain manager pod: %s (%s)", mgrPodName, mgrPodNamespace)) - os.Exit(1) - } - - var mgrContainer *corev1.Container - for _, container := range mgrPod.Spec.Containers { - if container.Name == "manager" { - mgrContainer = &container - break + injector := &actionssummerwindnet.PodRunnerTokenInjector{ + Client: mgr.GetClient(), + GitHubClient: multiClient, + Log: ctrl.Log.WithName("webhook").WithName("PodRunnerTokenInjector"), + } + if err = injector.SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create webhook server", "webhook", "PodRunnerTokenInjector") + os.Exit(1) } - } - - if mgrContainer != nil { - log.Info("Detected manager container", "image", mgrContainer.Image) - } else { - log.Error(err, "unable to obtain manager container image") - os.Exit(1) - } - if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("AutoscalingRunnerSet"), - Scheme: mgr.GetScheme(), - ControllerNamespace: mgrPodNamespace, - DefaultRunnerScaleSetListenerImage: mgrContainer.Image, - ActionsClient: actionsMultiClient, - DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets, - }).SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet") - os.Exit(1) - } - - if err = (&actionsgithubcom.EphemeralRunnerReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("EphemeralRunner"), - Scheme: mgr.GetScheme(), - ActionsClient: actionsMultiClient, - }).SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "EphemeralRunner") - os.Exit(1) - } - - if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("EphemeralRunnerSet"), - Scheme: mgr.GetScheme(), - ActionsClient: actionsMultiClient, - }).SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet") - os.Exit(1) - } - if err = (&actionsgithubcom.AutoscalingListenerReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("AutoscalingListener"), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create controller", "controller", "AutoscalingListener") - os.Exit(1) - } - // +kubebuilder:scaffold:builder - } - - if !disableAdmissionWebhook && !autoScalingRunnerSetOnly { - injector := &actionssummerwindnet.PodRunnerTokenInjector{ - Client: mgr.GetClient(), - GitHubClient: multiClient, - Log: ctrl.Log.WithName("webhook").WithName("PodRunnerTokenInjector"), - } - if err = injector.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create webhook server", "webhook", "PodRunnerTokenInjector") - os.Exit(1) } } From fb6f4d008a54cab2d9f3e015c69da5667074e30d Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Fri, 10 Mar 2023 18:14:00 +0100 Subject: [PATCH 127/561] Add upgrade steps (#2392) Co-authored-by: Nikola Jokic --- .../gha-runner-scale-set-controller/README.md | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index a3ce48d0b9..c1aa774685 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -124,6 +124,31 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 arc-runners arc-runner-set-rmrgw-runner-p9p5n 1/1 Running 0 21s ``` +### Upgrade to newer versions + +Upgrading actions-runner-controller requires a few extra steps because CRDs will not be automatically upgraded (this is a helm limitation). + +1. Uninstall the autoscaling runner set first + + ```bash + INSTALLATION_NAME="arc-runner-set" + NAMESPACE="arc-runners" + helm uninstall "${INSTALLATION_NAME}" --namespace "${NAMESPACE}" + ``` + +1. Wait for all the pods to drain + +1. Pull the new helm chart, unpack it and update the CRDs. When applying this step, don't forget to replace `` with the path of the `gha-runner-scale-set-controller` helm chart: + + ```bash + helm pull oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \ + --version 0.3.0 \ + --untar && \ + kubectl replace -f /gha-runner-scale-set-controller/crds/ + ``` + +1. Reinstall actions-runner-controller using the steps from the previous section + ## Troubleshooting ### Check the logs From 37713acc394ba2b0042b70b8fb1c74051ec38fa4 Mon Sep 17 00:00:00 2001 From: Milas Bowman Date: Sun, 12 Mar 2023 21:29:40 -0400 Subject: [PATCH 128/561] Upgrade to Docker Engine v20.10.23 (#2328) Co-authored-by: Yusuke Kuoka --- .github/workflows/release-runners.yaml | 2 +- runner/Makefile | 2 +- runner/actions-runner-dind.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner-dind.ubuntu-22.04.dockerfile | 2 +- runner/actions-runner.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner.ubuntu-22.04.dockerfile | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release-runners.yaml b/.github/workflows/release-runners.yaml index 5e6f1efefb..7a2334fada 100644 --- a/.github/workflows/release-runners.yaml +++ b/.github/workflows/release-runners.yaml @@ -17,7 +17,7 @@ env: PUSH_TO_REGISTRIES: true TARGET_ORG: actions-runner-controller TARGET_WORKFLOW: release-runners.yaml - DOCKER_VERSION: 20.10.21 + DOCKER_VERSION: 20.10.23 RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0 jobs: diff --git a/runner/Makefile b/runner/Makefile index acd5fd7091..e98a933aa6 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -8,7 +8,7 @@ TARGETPLATFORM ?= $(shell arch) RUNNER_VERSION ?= 2.302.1 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.2.0 -DOCKER_VERSION ?= 20.10.21 +DOCKER_VERSION ?= 20.10.23 # default list of platforms for which multiarch image is built ifeq (${PLATFORMS}, ) diff --git a/runner/actions-runner-dind.ubuntu-20.04.dockerfile b/runner/actions-runner-dind.ubuntu-20.04.dockerfile index fb83c166fe..da19b4b896 100644 --- a/runner/actions-runner-dind.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-20.04.dockerfile @@ -5,7 +5,7 @@ ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable -ARG DOCKER_VERSION=20.10.18 +ARG DOCKER_VERSION=20.10.23 ARG DOCKER_COMPOSE_VERSION=v2.16.0 ARG DUMB_INIT_VERSION=1.2.5 diff --git a/runner/actions-runner-dind.ubuntu-22.04.dockerfile b/runner/actions-runner-dind.ubuntu-22.04.dockerfile index 65e0365856..3532f2a514 100644 --- a/runner/actions-runner-dind.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-22.04.dockerfile @@ -5,7 +5,7 @@ ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable -ARG DOCKER_VERSION=20.10.21 +ARG DOCKER_VERSION=20.10.23 ARG DOCKER_COMPOSE_VERSION=v2.16.0 ARG DUMB_INIT_VERSION=1.2.5 ARG RUNNER_USER_UID=1001 diff --git a/runner/actions-runner.ubuntu-20.04.dockerfile b/runner/actions-runner.ubuntu-20.04.dockerfile index 0339174fd9..5c246a0cd0 100644 --- a/runner/actions-runner.ubuntu-20.04.dockerfile +++ b/runner/actions-runner.ubuntu-20.04.dockerfile @@ -5,7 +5,7 @@ ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable -ARG DOCKER_VERSION=20.10.18 +ARG DOCKER_VERSION=20.10.23 ARG DOCKER_COMPOSE_VERSION=v2.16.0 ARG DUMB_INIT_VERSION=1.2.5 diff --git a/runner/actions-runner.ubuntu-22.04.dockerfile b/runner/actions-runner.ubuntu-22.04.dockerfile index 275601753d..966856fbba 100644 --- a/runner/actions-runner.ubuntu-22.04.dockerfile +++ b/runner/actions-runner.ubuntu-22.04.dockerfile @@ -5,7 +5,7 @@ ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 # Docker and Docker Compose arguments ARG CHANNEL=stable -ARG DOCKER_VERSION=20.10.21 +ARG DOCKER_VERSION=20.10.23 ARG DOCKER_COMPOSE_VERSION=v2.16.0 ARG DUMB_INIT_VERSION=1.2.5 ARG RUNNER_USER_UID=1001 From c45331e030a2f4870cccaaaca07b4b0a6f8582d0 Mon Sep 17 00:00:00 2001 From: Hamish Forbes Date: Mon, 13 Mar 2023 14:50:36 +1300 Subject: [PATCH 129/561] feat(actionsmetrics): Add owner and workflow_name labels to workflow job metrics (#2225) --- pkg/actionsmetrics/event_reader.go | 34 +++++++++++++++++++++-------- pkg/actionsmetrics/metrics.go | 19 ++++++++++------ pkg/actionsmetrics/webhookserver.go | 2 +- 3 files changed, 38 insertions(+), 17 deletions(-) diff --git a/pkg/actionsmetrics/event_reader.go b/pkg/actionsmetrics/event_reader.go index 4beb33f5d4..1874d4ae11 100644 --- a/pkg/actionsmetrics/event_reader.go +++ b/pkg/actionsmetrics/event_reader.go @@ -79,14 +79,34 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in labels["repository_full_name"] = *n keysAndValues = append(keysAndValues, "repository_full_name", *n) } + + if e.Repo.Owner != nil { + if l := e.Repo.Owner.Login; l != nil { + labels["owner"] = *l + keysAndValues = append(keysAndValues, "owner", *l) + } + } } + var org string if e.Org != nil { if n := e.Org.Name; n != nil { - labels["organization"] = *e.Org.Name + org = *n keysAndValues = append(keysAndValues, "organization", *n) } } + labels["organization"] = org + + var wn string + if e.WorkflowJob != nil { + if n := e.WorkflowJob.WorkflowName; n != nil { + wn = *n + keysAndValues = append(keysAndValues, "workflow_name", *n) + } + } + labels["workflow_name"] = wn + + log := reader.Log.WithValues(keysAndValues...) // switch on job status switch action := e.GetAction(); action { @@ -102,14 +122,10 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e) if err != nil { - reader.Log.Error(err, "reading workflow job log") + log.Error(err, "reading workflow job log") return } else { - reader.Log.WithValues("job_name", *e.WorkflowJob.Name, "job_id", fmt.Sprint(*e.WorkflowJob.ID), "repository", *e.Repo.Name, "repository_full_name", *e.Repo.FullName) - if len(*e.Org.Name) > 0 { - reader.Log.WithValues("organization", *e.Org.Name) - } - reader.Log.Info("reading workflow_job logs") + log.Info("reading workflow_job logs") } githubWorkflowJobQueueDurationSeconds.With(labels).Observe(parseResult.QueueTime.Seconds()) @@ -122,10 +138,10 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e) if err != nil { - reader.Log.Error(err, "reading workflow job log") + log.Error(err, "reading workflow job log") return } else { - reader.Log.Info("reading workflow_job logs", keysAndValues...) + log.Info("reading workflow_job logs", keysAndValues...) } if *e.WorkflowJob.Conclusion == "failure" { diff --git a/pkg/actionsmetrics/metrics.go b/pkg/actionsmetrics/metrics.go index 5adb7bb4a6..1c0deb1b65 100644 --- a/pkg/actionsmetrics/metrics.go +++ b/pkg/actionsmetrics/metrics.go @@ -71,14 +71,19 @@ var ( } ) +func metricLabels(extras ...string) []string { + return append(append([]string{}, commonLabels...), extras...) +} + var ( + commonLabels = []string{"runs_on", "job_name", "organization", "repository", "repository_full_name", "owner", "workflow_name"} githubWorkflowJobQueueDurationSeconds = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "github_workflow_job_queue_duration_seconds", Help: "Queue times for workflow jobs in seconds", Buckets: runtimeBuckets, }, - []string{"runs_on", "job_name"}, + metricLabels(), ) githubWorkflowJobRunDurationSeconds = prometheus.NewHistogramVec( prometheus.HistogramOpts{ @@ -86,41 +91,41 @@ var ( Help: "Run times for workflow jobs in seconds", Buckets: runtimeBuckets, }, - []string{"runs_on", "job_name", "job_conclusion"}, + metricLabels("job_conclusion"), ) githubWorkflowJobConclusionsTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "github_workflow_job_conclusions_total", Help: "Conclusions for tracked workflow jobs", }, - []string{"runs_on", "job_name", "job_conclusion"}, + metricLabels("job_conclusion"), ) githubWorkflowJobsQueuedTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "github_workflow_jobs_queued_total", Help: "Total count of workflow jobs queued (events where job_status=queued)", }, - []string{"runs_on", "job_name"}, + metricLabels(), ) githubWorkflowJobsStartedTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "github_workflow_jobs_started_total", Help: "Total count of workflow jobs started (events where job_status=in_progress)", }, - []string{"runs_on", "job_name"}, + metricLabels(), ) githubWorkflowJobsCompletedTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "github_workflow_jobs_completed_total", Help: "Total count of workflow jobs completed (events where job_status=completed)", }, - []string{"runs_on", "job_name"}, + metricLabels(), ) githubWorkflowJobFailuresTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "github_workflow_job_failures_total", Help: "Conclusions for tracked workflow runs", }, - []string{"runs_on", "job_name", "failed_step", "exit_code"}, + metricLabels("failed_step", "exit_code"), ) ) diff --git a/pkg/actionsmetrics/webhookserver.go b/pkg/actionsmetrics/webhookserver.go index 1fa306897a..7d4cb8949d 100644 --- a/pkg/actionsmetrics/webhookserver.go +++ b/pkg/actionsmetrics/webhookserver.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/go-logr/logr" - gogithub "github.com/google/go-github/v47/github" + gogithub "github.com/google/go-github/v50/github" ctrl "sigs.k8s.io/controller-runtime" "github.com/actions/actions-runner-controller/github" From a3d800c3fb449e05850242e9be059ce85f5958de Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 13 Mar 2023 11:56:21 +0100 Subject: [PATCH 130/561] Fix wrong file name in the workflow (#2394) --- .github/workflows/update-runners.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/update-runners.yaml b/.github/workflows/update-runners.yaml index 3c447d0b1f..884ddd6a68 100644 --- a/.github/workflows/update-runners.yaml +++ b/.github/workflows/update-runners.yaml @@ -93,7 +93,7 @@ jobs: sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go - sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" .github/workflows/e2e_test_linux_vm.yaml + sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" .github/workflows/e2e-test-linux-vm.yaml - name: Commit changes run: | From 622c1b18a19581ca0899853db13df3881d29aaca Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 13 Mar 2023 13:39:07 +0100 Subject: [PATCH 131/561] Delete renovate.json5 (#2397) --- .github/renovate.json5 | 43 ------------------------------------------ 1 file changed, 43 deletions(-) delete mode 100644 .github/renovate.json5 diff --git a/.github/renovate.json5 b/.github/renovate.json5 deleted file mode 100644 index 21f4570051..0000000000 --- a/.github/renovate.json5 +++ /dev/null @@ -1,43 +0,0 @@ -{ - "extends": ["config:base"], - "labels": ["dependencies"], - "packageRules": [ - { - // automatically merge an update of runner - "matchPackageNames": ["actions/runner"], - "extractVersion": "^v(?.*)$", - "automerge": true - } - ], - "regexManagers": [ - { - // use https://github.com/actions/runner/releases - "fileMatch": [ - ".github/workflows/runners.yaml" - ], - "matchStrings": ["RUNNER_VERSION: +(?.*?)\\n"], - "depNameTemplate": "actions/runner", - "datasourceTemplate": "github-releases" - }, - { - "fileMatch": [ - "runner/Makefile", - "Makefile" - ], - "matchStrings": ["RUNNER_VERSION \\?= +(?.*?)\\n"], - "depNameTemplate": "actions/runner", - "datasourceTemplate": "github-releases" - }, - { - "fileMatch": [ - "runner/actions-runner.ubuntu-20.04.dockerfile", - "runner/actions-runner.ubuntu-22.04.dockerfile", - "runner/actions-runner-dind.ubuntu-20.04.dockerfile", - "runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile" - ], - "matchStrings": ["RUNNER_VERSION=+(?.*?)\\n"], - "depNameTemplate": "actions/runner", - "datasourceTemplate": "github-releases" - } - ] -} From 4ac1f54f617cd554084e63ca6b632e5826295ce3 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Mon, 13 Mar 2023 16:16:28 +0100 Subject: [PATCH 132/561] Surface EphemeralRunnerSet stats to AutoscalingRunnerSet (#2382) --- .../v1alpha1/autoscalingrunnerset_types.go | 23 ++- .../v1alpha1/ephemeralrunnerset_types.go | 16 +- ...ions.github.com_autoscalingrunnersets.yaml | 24 ++- ...ctions.github.com_ephemeralrunnersets.yaml | 20 ++ ...ions.github.com_autoscalingrunnersets.yaml | 24 ++- ...ctions.github.com_ephemeralrunnersets.yaml | 20 ++ .../autoscalingrunnerset_controller.go | 3 + .../autoscalingrunnerset_controller_test.go | 85 ++++++-- .../ephemeralrunnerset_controller.go | 11 +- .../ephemeralrunnerset_controller_test.go | 182 +++++++++++++++++- 10 files changed, 373 insertions(+), 35 deletions(-) diff --git a/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go index adc9a94e0e..35003e592a 100644 --- a/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go +++ b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go @@ -33,10 +33,14 @@ import ( //+kubebuilder:object:root=true //+kubebuilder:subresource:status -//+kubebuilder:printcolumn:JSONPath=".spec.minRunners",name=Minimum Runners,type=number -//+kubebuilder:printcolumn:JSONPath=".spec.maxRunners",name=Maximum Runners,type=number -//+kubebuilder:printcolumn:JSONPath=".status.currentRunners",name=Current Runners,type=number +//+kubebuilder:printcolumn:JSONPath=".spec.minRunners",name=Minimum Runners,type=integer +//+kubebuilder:printcolumn:JSONPath=".spec.maxRunners",name=Maximum Runners,type=integer +//+kubebuilder:printcolumn:JSONPath=".status.currentRunners",name=Current Runners,type=integer //+kubebuilder:printcolumn:JSONPath=".status.state",name=State,type=string +//+kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer +//+kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer +//+kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer +//+kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer // AutoscalingRunnerSet is the Schema for the autoscalingrunnersets API type AutoscalingRunnerSet struct { @@ -228,10 +232,19 @@ type ProxyServerConfig struct { // AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet type AutoscalingRunnerSetStatus struct { // +optional - CurrentRunners int `json:"currentRunners,omitempty"` + CurrentRunners int `json:"currentRunners"` // +optional - State string `json:"state,omitempty"` + State string `json:"state"` + + // EphemeralRunner counts separated by the stage ephemeral runners are in, taken from the EphemeralRunnerSet + + //+optional + PendingEphemeralRunners int `json:"pendingEphemeralRunners"` + // +optional + RunningEphemeralRunners int `json:"runningEphemeralRunners"` + // +optional + FailedEphemeralRunners int `json:"failedEphemeralRunners"` } func (ars *AutoscalingRunnerSet) ListenerSpecHash() string { diff --git a/apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go b/apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go index 167296d640..88524f2a41 100644 --- a/apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go +++ b/apis/actions.github.com/v1alpha1/ephemeralrunnerset_types.go @@ -31,13 +31,27 @@ type EphemeralRunnerSetSpec struct { // EphemeralRunnerSetStatus defines the observed state of EphemeralRunnerSet type EphemeralRunnerSetStatus struct { // CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet. - CurrentReplicas int `json:"currentReplicas,omitempty"` + CurrentReplicas int `json:"currentReplicas"` + + // EphemeralRunner counts separated by the stage ephemeral runners are in + + // +optional + PendingEphemeralRunners int `json:"pendingEphemeralRunners"` + // +optional + RunningEphemeralRunners int `json:"runningEphemeralRunners"` + // +optional + FailedEphemeralRunners int `json:"failedEphemeralRunners"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="DesiredReplicas",type="integer" // +kubebuilder:printcolumn:JSONPath=".status.currentReplicas", name="CurrentReplicas",type="integer" +//+kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer +//+kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer +//+kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer +//+kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer + // EphemeralRunnerSet is the Schema for the ephemeralrunnersets API type EphemeralRunnerSet struct { metav1.TypeMeta `json:",inline"` diff --git a/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml index 6c4c82cbf3..992926cdad 100644 --- a/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml +++ b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalingrunnersets.yaml @@ -17,16 +17,28 @@ spec: - additionalPrinterColumns: - jsonPath: .spec.minRunners name: Minimum Runners - type: number + type: integer - jsonPath: .spec.maxRunners name: Maximum Runners - type: number + type: integer - jsonPath: .status.currentRunners name: Current Runners - type: number + type: integer - jsonPath: .status.state name: State type: string + - jsonPath: .status.pendingEphemeralRunners + name: Pending Runners + type: integer + - jsonPath: .status.runningEphemeralRunners + name: Running Runners + type: integer + - jsonPath: .status.finishedEphemeralRunners + name: Finished Runners + type: integer + - jsonPath: .status.deletingEphemeralRunners + name: Deleting Runners + type: integer name: v1alpha1 schema: openAPIV3Schema: @@ -4306,6 +4318,12 @@ spec: properties: currentRunners: type: integer + failedEphemeralRunners: + type: integer + pendingEphemeralRunners: + type: integer + runningEphemeralRunners: + type: integer state: type: string type: object diff --git a/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunnersets.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunnersets.yaml index 86a3f40ce7..1e4b475198 100644 --- a/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunnersets.yaml +++ b/charts/gha-runner-scale-set-controller/crds/actions.github.com_ephemeralrunnersets.yaml @@ -21,6 +21,18 @@ spec: - jsonPath: .status.currentReplicas name: CurrentReplicas type: integer + - jsonPath: .status.pendingEphemeralRunners + name: Pending Runners + type: integer + - jsonPath: .status.runningEphemeralRunners + name: Running Runners + type: integer + - jsonPath: .status.finishedEphemeralRunners + name: Finished Runners + type: integer + - jsonPath: .status.deletingEphemeralRunners + name: Deleting Runners + type: integer name: v1alpha1 schema: openAPIV3Schema: @@ -4296,6 +4308,14 @@ spec: currentReplicas: description: CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet. type: integer + failedEphemeralRunners: + type: integer + pendingEphemeralRunners: + type: integer + runningEphemeralRunners: + type: integer + required: + - currentReplicas type: object type: object served: true diff --git a/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml index 6c4c82cbf3..992926cdad 100644 --- a/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml +++ b/config/crd/bases/actions.github.com_autoscalingrunnersets.yaml @@ -17,16 +17,28 @@ spec: - additionalPrinterColumns: - jsonPath: .spec.minRunners name: Minimum Runners - type: number + type: integer - jsonPath: .spec.maxRunners name: Maximum Runners - type: number + type: integer - jsonPath: .status.currentRunners name: Current Runners - type: number + type: integer - jsonPath: .status.state name: State type: string + - jsonPath: .status.pendingEphemeralRunners + name: Pending Runners + type: integer + - jsonPath: .status.runningEphemeralRunners + name: Running Runners + type: integer + - jsonPath: .status.finishedEphemeralRunners + name: Finished Runners + type: integer + - jsonPath: .status.deletingEphemeralRunners + name: Deleting Runners + type: integer name: v1alpha1 schema: openAPIV3Schema: @@ -4306,6 +4318,12 @@ spec: properties: currentRunners: type: integer + failedEphemeralRunners: + type: integer + pendingEphemeralRunners: + type: integer + runningEphemeralRunners: + type: integer state: type: string type: object diff --git a/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml b/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml index 86a3f40ce7..1e4b475198 100644 --- a/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml +++ b/config/crd/bases/actions.github.com_ephemeralrunnersets.yaml @@ -21,6 +21,18 @@ spec: - jsonPath: .status.currentReplicas name: CurrentReplicas type: integer + - jsonPath: .status.pendingEphemeralRunners + name: Pending Runners + type: integer + - jsonPath: .status.runningEphemeralRunners + name: Running Runners + type: integer + - jsonPath: .status.finishedEphemeralRunners + name: Finished Runners + type: integer + - jsonPath: .status.deletingEphemeralRunners + name: Deleting Runners + type: integer name: v1alpha1 schema: openAPIV3Schema: @@ -4296,6 +4308,14 @@ spec: currentReplicas: description: CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet. type: integer + failedEphemeralRunners: + type: integer + pendingEphemeralRunners: + type: integer + runningEphemeralRunners: + type: integer + required: + - currentReplicas type: object type: object served: true diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index c7e95201e5..b279e084e2 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -238,6 +238,9 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners { if err := patchSubResource(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { obj.Status.CurrentRunners = latestRunnerSet.Status.CurrentReplicas + obj.Status.PendingEphemeralRunners = latestRunnerSet.Status.PendingEphemeralRunners + obj.Status.RunningEphemeralRunners = latestRunnerSet.Status.RunningEphemeralRunners + obj.Status.FailedEphemeralRunners = latestRunnerSet.Status.FailedEphemeralRunners }); err != nil { log.Error(err, "Failed to update autoscaling runner set status with current runner count") return ctrl.Result{}, err diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index aa2ae57d30..2a5fd7803c 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -157,23 +157,6 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") Expect(len(runnerSetList.Items)).To(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created") - runnerSet := runnerSetList.Items[0] - statusUpdate := runnerSet.DeepCopy() - statusUpdate.Status.CurrentReplicas = 100 - err = k8sClient.Status().Patch(ctx, statusUpdate, client.MergeFrom(&runnerSet)) - Expect(err).NotTo(HaveOccurred(), "failed to patch EphemeralRunnerSet status") - - Eventually( - func() (int, error) { - updated := new(v1alpha1.AutoscalingRunnerSet) - err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated) - if err != nil { - return 0, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err) - } - return updated.Status.CurrentRunners, nil - }, - autoscalingRunnerSetTestTimeout, - autoscalingRunnerSetTestInterval).Should(BeEquivalentTo(100), "AutoScalingRunnerSet status should be updated") }) }) @@ -398,9 +381,75 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { return updated.Annotations[runnerScaleSetRunnerGroupNameKey], nil }, autoscalingRunnerSetTestTimeout, - autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the runner group in its annotation") + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the runner group in its annotation") }) }) + + It("Should update Status on EphemeralRunnerSet status Update", func() { + ars := new(v1alpha1.AutoscalingRunnerSet) + Eventually( + func() (bool, error) { + err := k8sClient.Get( + ctx, + client.ObjectKey{ + Name: autoscalingRunnerSet.Name, + Namespace: autoscalingRunnerSet.Namespace, + }, + ars, + ) + if err != nil { + return false, err + } + return true, nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeTrue(), "AutoscalingRunnerSet should be created") + + runnerSetList := new(v1alpha1.EphemeralRunnerSetList) + Eventually(func() (int, error) { + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(ars.Namespace)) + if err != nil { + return 0, err + } + return len(runnerSetList.Items), nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo(1), "Failed to fetch runner set list") + + runnerSet := runnerSetList.Items[0] + statusUpdate := runnerSet.DeepCopy() + statusUpdate.Status.CurrentReplicas = 6 + statusUpdate.Status.FailedEphemeralRunners = 1 + statusUpdate.Status.RunningEphemeralRunners = 2 + statusUpdate.Status.PendingEphemeralRunners = 3 + + desiredStatus := v1alpha1.AutoscalingRunnerSetStatus{ + CurrentRunners: statusUpdate.Status.CurrentReplicas, + State: "", + PendingEphemeralRunners: statusUpdate.Status.PendingEphemeralRunners, + RunningEphemeralRunners: statusUpdate.Status.RunningEphemeralRunners, + FailedEphemeralRunners: statusUpdate.Status.FailedEphemeralRunners, + } + + err := k8sClient.Status().Patch(ctx, statusUpdate, client.MergeFrom(&runnerSet)) + Expect(err).NotTo(HaveOccurred(), "Failed to patch runner set status") + + Eventually( + func() (v1alpha1.AutoscalingRunnerSetStatus, error) { + updated := new(v1alpha1.AutoscalingRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated) + if err != nil { + return v1alpha1.AutoscalingRunnerSetStatus{}, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err) + } + return updated.Status, nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo(desiredStatus), "AutoScalingRunnerSet status should be updated") + }) }) var _ = Describe("Test AutoScalingController updates", func() { diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index 0db5840976..27a8a22704 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -200,11 +200,18 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R } } + desiredStatus := v1alpha1.EphemeralRunnerSetStatus{ + CurrentReplicas: total, + PendingEphemeralRunners: len(pendingEphemeralRunners), + RunningEphemeralRunners: len(runningEphemeralRunners), + FailedEphemeralRunners: len(failedEphemeralRunners), + } + // Update the status if needed. - if ephemeralRunnerSet.Status.CurrentReplicas != total { + if ephemeralRunnerSet.Status != desiredStatus { log.Info("Updating status with current runners count", "count", total) if err := patchSubResource(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) { - obj.Status.CurrentReplicas = total + obj.Status = desiredStatus }); err != nil { log.Error(err, "Failed to update status with current runners count") return ctrl.Result{}, err diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go index d51698ab09..4459a3f315 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -559,6 +559,181 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "0 EphemeralRunner should be created") }) + + It("Should update status on Ephemeral Runner state changes", func() { + created := new(actionsv1alpha1.EphemeralRunnerSet) + Eventually( + func() error { + return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(Succeed(), "EphemeralRunnerSet should be created") + + // Scale up the EphemeralRunnerSet + updated := created.DeepCopy() + updated.Spec.Replicas = 3 + err := k8sClient.Update(ctx, updated) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet replica count") + + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (bool, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return false, err + } + + if len(runnerList.Items) != 3 { + return false, err + } + + var pendingOriginal *v1alpha1.EphemeralRunner + var runningOriginal *v1alpha1.EphemeralRunner + var failedOriginal *v1alpha1.EphemeralRunner + var empty []*v1alpha1.EphemeralRunner + for _, runner := range runnerList.Items { + switch runner.Status.RunnerId { + case 101: + pendingOriginal = runner.DeepCopy() + case 102: + runningOriginal = runner.DeepCopy() + case 103: + failedOriginal = runner.DeepCopy() + default: + empty = append(empty, runner.DeepCopy()) + } + } + + refetch := false + if pendingOriginal == nil { // if NO pending + refetch = true + pendingOriginal = empty[0] + empty = empty[1:] + + pending := pendingOriginal.DeepCopy() + pending.Status.RunnerId = 101 + pending.Status.Phase = corev1.PodPending + + err = k8sClient.Status().Patch(ctx, pending, client.MergeFrom(pendingOriginal)) + if err != nil { + return false, err + } + } + + if runningOriginal == nil { // if NO running + refetch = true + runningOriginal = empty[0] + empty = empty[1:] + running := runningOriginal.DeepCopy() + running.Status.RunnerId = 102 + running.Status.Phase = corev1.PodRunning + + err = k8sClient.Status().Patch(ctx, running, client.MergeFrom(runningOriginal)) + if err != nil { + return false, err + } + } + + if failedOriginal == nil { // if NO failed + refetch = true + failedOriginal = empty[0] + + failed := pendingOriginal.DeepCopy() + failed.Status.RunnerId = 103 + failed.Status.Phase = corev1.PodFailed + + err = k8sClient.Status().Patch(ctx, failed, client.MergeFrom(failedOriginal)) + if err != nil { + return false, err + } + } + + return !refetch, nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeTrue(), "Failed to eventually update to one pending, one running and one failed") + + desiredStatus := v1alpha1.EphemeralRunnerSetStatus{ + CurrentReplicas: 3, + PendingEphemeralRunners: 1, + RunningEphemeralRunners: 1, + FailedEphemeralRunners: 1, + } + Eventually( + func() (v1alpha1.EphemeralRunnerSetStatus, error) { + updated := new(v1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated) + if err != nil { + return v1alpha1.EphemeralRunnerSetStatus{}, err + } + return updated.Status, nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(desiredStatus), "Status is not eventually updated to the desired one") + + updated = new(v1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated) + Expect(err).NotTo(HaveOccurred(), "Failed to fetch ephemeral runner set") + + updatedOriginal := updated.DeepCopy() + updated.Spec.Replicas = 0 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(updatedOriginal)) + Expect(err).NotTo(HaveOccurred(), "Failed to patch ephemeral runner set with 0 replicas") + + Eventually( + func() (int, error) { + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(1), "Failed to eventually scale down") + + desiredStatus = v1alpha1.EphemeralRunnerSetStatus{ + CurrentReplicas: 1, + PendingEphemeralRunners: 0, + RunningEphemeralRunners: 0, + FailedEphemeralRunners: 1, + } + + Eventually( + func() (v1alpha1.EphemeralRunnerSetStatus, error) { + updated := new(v1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated) + if err != nil { + return v1alpha1.EphemeralRunnerSetStatus{}, err + } + return updated.Status, nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(desiredStatus), "Status is not eventually updated to the desired one") + + err = k8sClient.Delete(ctx, &runnerList.Items[0]) + Expect(err).To(BeNil(), "Failed to delete failed ephemeral runner") + + desiredStatus = v1alpha1.EphemeralRunnerSetStatus{} // empty + Eventually( + func() (v1alpha1.EphemeralRunnerSetStatus, error) { + updated := new(v1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, updated) + if err != nil { + return v1alpha1.EphemeralRunnerSetStatus{}, err + } + return updated.Status, nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(desiredStatus), "Status is not eventually updated to the desired one") + }) }) }) @@ -821,12 +996,13 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0])) Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status") - updatedRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) - err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, updatedRunnerSet) + runnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, runnerSet) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + updatedRunnerSet := runnerSet.DeepCopy() updatedRunnerSet.Spec.Replicas = 0 - err = k8sClient.Update(ctx, updatedRunnerSet) + err = k8sClient.Patch(ctx, updatedRunnerSet, client.MergeFrom(runnerSet)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") Eventually( From 6e81d95a7b5511e43547e57a527795a5c58e8b93 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Mon, 13 Mar 2023 12:44:54 -0400 Subject: [PATCH 133/561] Create separate chart validation workflow for gha-* charts. (#2393) Co-authored-by: Nikola Jokic --- .github/workflows/validate-chart.yaml | 4 + .github/workflows/validate-gha-chart.yaml | 134 ++++++++++++++++++ charts/.ci/ct-config-gha.yaml | 9 ++ charts/.ci/ct-config.yaml | 2 - .../ci/ci-values.yaml | 5 + charts/gha-runner-scale-set/ci/ci-values.yaml | 2 +- 6 files changed, 153 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/validate-gha-chart.yaml create mode 100644 charts/.ci/ct-config-gha.yaml create mode 100644 charts/gha-runner-scale-set-controller/ci/ci-values.yaml diff --git a/.github/workflows/validate-chart.yaml b/.github/workflows/validate-chart.yaml index 61ade607eb..5475649cbf 100644 --- a/.github/workflows/validate-chart.yaml +++ b/.github/workflows/validate-chart.yaml @@ -9,12 +9,16 @@ on: - '.github/workflows/validate-chart.yaml' - '!charts/actions-runner-controller/docs/**' - '!**.md' + - '!charts/gha-runner-scale-set-controller/**' + - '!charts/gha-runner-scale-set/**' push: paths: - 'charts/**' - '.github/workflows/validate-chart.yaml' - '!charts/actions-runner-controller/docs/**' - '!**.md' + - '!charts/gha-runner-scale-set-controller/**' + - '!charts/gha-runner-scale-set/**' workflow_dispatch: env: KUBE_SCORE_VERSION: 1.10.0 diff --git a/.github/workflows/validate-gha-chart.yaml b/.github/workflows/validate-gha-chart.yaml new file mode 100644 index 0000000000..0d54f6e27c --- /dev/null +++ b/.github/workflows/validate-gha-chart.yaml @@ -0,0 +1,134 @@ +name: Validate Helm Chart (gha-runner-scale-set-controller and gha-runner-scale-set) + +on: + pull_request: + branches: + - master + paths: + - 'charts/**' + - '.github/workflows/validate-gha-chart.yaml' + - '!charts/actions-runner-controller/**' + - '!**.md' + push: + paths: + - 'charts/**' + - '.github/workflows/validate-gha-chart.yaml' + - '!charts/actions-runner-controller/**' + - '!**.md' + workflow_dispatch: +env: + KUBE_SCORE_VERSION: 1.16.1 + HELM_VERSION: v3.8.0 + +permissions: + contents: read + +jobs: + validate-chart: + name: Lint Chart + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set up Helm + # Using https://github.com/Azure/setup-helm/releases/tag/v3.5 + uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 + with: + version: ${{ env.HELM_VERSION }} + + - name: Set up kube-score + run: | + wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score + chmod 755 kube-score + + - name: Kube-score generated manifests + run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - + --ignore-test pod-networkpolicy + --ignore-test deployment-has-poddisruptionbudget + --ignore-test deployment-has-host-podantiaffinity + --ignore-test container-security-context + --ignore-test pod-probes + --ignore-test container-image-tag + --enable-optional-test container-security-context-privileged + --enable-optional-test container-security-context-readonlyrootfilesystem + + # python is a requirement for the chart-testing action below (supports yamllint among other tests) + - uses: actions/setup-python@v4 + with: + python-version: '3.7' + + - name: Set up chart-testing + uses: helm/chart-testing-action@v2.3.1 + + - name: Set up latest version chart-testing + run: | + echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list + sudo apt update + sudo apt install goreleaser + git clone https://github.com/helm/chart-testing + cd chart-testing + unset CT_CONFIG_DIR + goreleaser build --clean --skip-validate + ./dist/chart-testing_linux_amd64_v1/ct version + echo 'Adding ct directory to PATH...' + echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH" + echo 'Setting CT_CONFIG_DIR...' + echo "CT_CONFIG_DIR=$RUNNER_TEMP/chart-testing/etc" >> "$GITHUB_ENV" + working-directory: ${{ runner.temp }} + + - name: Run chart-testing (list-changed) + id: list-changed + run: | + ct version + changed=$(ct list-changed --config charts/.ci/ct-config-gha.yaml) + if [[ -n "$changed" ]]; then + echo "::set-output name=changed::true" + fi + + - name: Run chart-testing (lint) + run: | + ct lint --config charts/.ci/ct-config-gha.yaml + + - name: Set up docker buildx + uses: docker/setup-buildx-action@v2 + if: steps.list-changed.outputs.changed == 'true' + with: + version: latest + + - name: Build controller image + uses: docker/build-push-action@v3 + if: steps.list-changed.outputs.changed == 'true' + with: + file: Dockerfile + platforms: linux/amd64 + load: true + build-args: | + DOCKER_IMAGE_NAME=test-arc + VERSION=dev + tags: | + test-arc:dev + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Create kind cluster + uses: helm/kind-action@v1.4.0 + if: steps.list-changed.outputs.changed == 'true' + with: + cluster_name: chart-testing + + - name: Load image into cluster + if: steps.list-changed.outputs.changed == 'true' + run: | + export DOCKER_IMAGE_NAME=test-arc + export VERSION=dev + export IMG_RESULT=load + make docker-buildx + kind load docker-image test-arc:dev --name chart-testing + + - name: Run chart-testing (install) + if: steps.list-changed.outputs.changed == 'true' + run: | + ct install --config charts/.ci/ct-config-gha.yaml diff --git a/charts/.ci/ct-config-gha.yaml b/charts/.ci/ct-config-gha.yaml new file mode 100644 index 0000000000..baf8bc4323 --- /dev/null +++ b/charts/.ci/ct-config-gha.yaml @@ -0,0 +1,9 @@ +# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow +lint-conf: charts/.ci/lint-config.yaml +chart-repos: + - jetstack=https://charts.jetstack.io +check-version-increment: false # Disable checking that the chart version has been bumped +charts: +- charts/gha-runner-scale-set-controller +- charts/gha-runner-scale-set +skip-clean-up: true diff --git a/charts/.ci/ct-config.yaml b/charts/.ci/ct-config.yaml index 64c8d1bb2a..55ebad54a7 100644 --- a/charts/.ci/ct-config.yaml +++ b/charts/.ci/ct-config.yaml @@ -5,5 +5,3 @@ chart-repos: check-version-increment: false # Disable checking that the chart version has been bumped charts: - charts/actions-runner-controller -- charts/gha-runner-scale-set-controller -- charts/gha-runner-scale-set \ No newline at end of file diff --git a/charts/gha-runner-scale-set-controller/ci/ci-values.yaml b/charts/gha-runner-scale-set-controller/ci/ci-values.yaml new file mode 100644 index 0000000000..e8e805428b --- /dev/null +++ b/charts/gha-runner-scale-set-controller/ci/ci-values.yaml @@ -0,0 +1,5 @@ +# Set the following to dummy values. +# This is only useful in CI +image: + repository: test-arc + tag: dev diff --git a/charts/gha-runner-scale-set/ci/ci-values.yaml b/charts/gha-runner-scale-set/ci/ci-values.yaml index 3497fcab8c..00b9844e38 100644 --- a/charts/gha-runner-scale-set/ci/ci-values.yaml +++ b/charts/gha-runner-scale-set/ci/ci-values.yaml @@ -3,4 +3,4 @@ githubConfigUrl: https://github.com/actions/actions-runner-controller githubConfigSecret: - github_token: test \ No newline at end of file + github_token: test From 6774c320828b916e499be1d8b1d6ae410589afcb Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 14 Mar 2023 09:00:07 -0400 Subject: [PATCH 134/561] Update E2E test workflow. (#2395) --- .github/actions/e2e-arc-test/action.yaml | 45 ------ .github/actions/setup-arc-e2e/action.yaml | 65 ++++++++ .github/workflows/e2e-test-linux-vm.yaml | 139 ++++++++++++++---- .../ephemeralrunnerset_controller_test.go | 7 +- test_e2e_arc/arc_jobs_test.go | 6 +- 5 files changed, 179 insertions(+), 83 deletions(-) delete mode 100644 .github/actions/e2e-arc-test/action.yaml create mode 100644 .github/actions/setup-arc-e2e/action.yaml diff --git a/.github/actions/e2e-arc-test/action.yaml b/.github/actions/e2e-arc-test/action.yaml deleted file mode 100644 index 6573910202..0000000000 --- a/.github/actions/e2e-arc-test/action.yaml +++ /dev/null @@ -1,45 +0,0 @@ -name: 'E2E ARC Test Action' -description: 'Includes common arc installation, setup and test file run' - -inputs: - github-token: - description: 'JWT generated with Github App inputs' - required: true - config-url: - description: "URL of the repo, org or enterprise where the runner scale sets will be registered" - required: true - docker-image-repo: - description: "Local docker image repo for testing" - required: true - docker-image-tag: - description: "Tag of ARC Docker image for testing" - required: true - -runs: - using: "composite" - steps: - - name: Install ARC - run: helm install arc --namespace "arc-systems" --create-namespace --set image.tag=${{ inputs.docker-image-tag }} --set image.repository=${{ inputs.docker-image-repo }} ./charts/gha-runner-scale-set-controller - shell: bash - - name: Get datetime - # We are using this value further in the runner installation to avoid runner name collision that are a risk with hard coded values. - # A datetime including the 3 nanoseconds are a good option for this and also adds to readability and runner sorting if needed. - run: echo "DATE_TIME=$(date +'%Y-%m-%d-%H-%M-%S-%3N')" >> $GITHUB_ENV - shell: bash - - name: Install runners - run: | - helm install "arc-runner-${{ env.DATE_TIME }}" \ - --namespace "arc-runners" \ - --create-namespace \ - --set githubConfigUrl="${{ inputs.config-url }}" \ - --set githubConfigSecret.github_token="${{ inputs.github-token }}" \ - ./charts/gha-runner-scale-set \ - --debug - kubectl get pods -A - shell: bash - - name: Test ARC scales pods up and down - run: | - export GITHUB_TOKEN="${{ inputs.github-token }}" - export DATE_TIME="${{ env.DATE_TIME }}" - go test ./test_e2e_arc -v - shell: bash diff --git a/.github/actions/setup-arc-e2e/action.yaml b/.github/actions/setup-arc-e2e/action.yaml new file mode 100644 index 0000000000..1f533b16ba --- /dev/null +++ b/.github/actions/setup-arc-e2e/action.yaml @@ -0,0 +1,65 @@ +name: 'Setup ARC E2E Test Action' +description: 'Build controller image, create kind cluster, load the image, and exchange ARC configure token.' + +inputs: + github-app-id: + description: 'GitHub App Id for exchange access token' + required: true + github-app-pk: + description: "GitHub App private key for exchange access token" + required: true + github-app-org: + description: 'The organization the GitHub App has installed on' + required: true + docker-image-name: + description: "Local docker image name for building" + required: true + docker-image-tag: + description: "Tag of ARC Docker image for building" + required: true + +outputs: + token: + description: 'Token to use for configure ARC' + value: ${{steps.config-token.outputs.token}} + +runs: + using: "composite" + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + # Pinning v0.9.1 for Buildx and BuildKit v0.10.6 + # BuildKit v0.11 which has a bug causing intermittent + # failures pushing images to GHCR + version: v0.9.1 + driver-opts: image=moby/buildkit:v0.10.6 + + - name: Build controller image + uses: docker/build-push-action@v3 + with: + file: Dockerfile + platforms: linux/amd64 + load: true + build-args: | + DOCKER_IMAGE_NAME=${{inputs.docker-image-name}} + VERSION=${{inputs.docker-image-tag}} + tags: | + ${{inputs.docker-image-name}}:${{inputs.docker-image-tag}} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Create Kind cluster and load image + shell: bash + run: | + PATH=$(go env GOPATH)/bin:$PATH + kind create cluster --name arc-e2e + kind load docker-image ${{inputs.docker-image-name}}:${{inputs.docker-image-tag}} --name arc-e2e + + - name: Get configure token + id: config-token + uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db + with: + application_id: ${{ inputs.github-app-id }} + application_private_key: ${{ inputs.github-app-pk }} + organization: ${{ inputs.github-app-org }} \ No newline at end of file diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 08f575b718..88ba0dd1ee 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -5,47 +5,122 @@ on: branches: - master pull_request: + branches: + - master workflow_dispatch: + inputs: + target_org: + description: The org of the test repository. + required: true + default: actions-runner-controller + target_repo: + description: The repository to install the ARC. + required: true + default: arc_e2e_test_dummy env: TARGET_ORG: actions-runner-controller - CLUSTER_NAME: e2e-test - RUNNER_VERSION: 2.302.1 - IMAGE_REPO: "test/test-image" + TARGET_REPO: arc_e2e_test_dummy + IMAGE_NAME: "arc-test-image" + IMAGE_VERSION: "dev" jobs: - setup-steps: - runs-on: [ubuntu-latest] + default-setup: + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - name: Add env variables + + - name: Resolve inputs + id: resolved_inputs run: | - TAG=$(echo "0.0.$GITHUB_SHA") - echo "TAG=$TAG" >> $GITHUB_ENV - echo "IMAGE=$IMAGE_REPO:$TAG" >> $GITHUB_ENV - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + TARGET_ORG="${{env.TARGET_ORG}}" + TARGET_REPO="${{env.TARGET_REPO}}" + if [ ! -z "${{inputs.target_org}}" ]; then + TARGET_ORG="${{inputs.target_org}}" + fi + if [ ! -z "${{inputs.target_repo}}" ]; then + TARGET_REPO="${{inputs.target_repo}}" + fi + echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT + echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + + - uses: ./.github/actions/setup-arc-e2e + id: setup with: - version: latest - - name: Docker Build Test Image + github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} + github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} + github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} + docker-image-name: ${{env.IMAGE_NAME}} + docker-image-tag: ${{env.IMAGE_VERSION}} + + - name: Install gha-runner-scale-set-controller + id: install_arc_controller run: | - DOCKER_CLI_EXPERIMENTAL=enabled DOCKER_BUILDKIT=1 docker buildx build --build-arg RUNNER_VERSION=$RUNNER_VERSION --build-arg TAG=$TAG -t $IMAGE . --load - - name: Create Kind cluster + helm install arc \ + --namespace "arc-systems" \ + --create-namespace \ + --set image.repository=${{ env.IMAGE_NAME }} \ + --set image.tag=${{ env.IMAGE_VERSION }} \ + ./charts/gha-runner-scale-set-controller \ + --debug + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller + kubectl get pod -n arc-systems + kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems + + - name: Install gha-runner-scale-set + id: install_arc run: | - PATH=$(go env GOPATH)/bin:$PATH - kind create cluster --name $CLUSTER_NAME - - name: Load Image to Kind Cluster - run: kind load docker-image $IMAGE --name $CLUSTER_NAME - - name: Get Token - id: get_workflow_token - uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db - with: - application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} - application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} - organization: ${{ env.TARGET_ORG }} - - uses: ./.github/actions/e2e-arc-test - with: - github-token: ${{ steps.get_workflow_token.outputs.token }} - config-url: "https://github.com/actions-runner-controller/arc_e2e_test_dummy" - docker-image-repo: $IMAGE_REPO - docker-image-tag: $TAG + ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + helm install "$ARC_NAME" \ + --namespace "arc-runners" \ + --create-namespace \ + --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ + ./charts/gha-runner-scale-set \ + --debug + echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl get pod -n arc-systems + + - name: Test ARC scales pods up and down + run: | + export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" + export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" + go test ./test_e2e_arc -v + + - name: Uninstall gha-runner-scale-set + if: always() && steps.install_arc.outcome == 'success' + run: | + helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners + kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} + + - name: Dump gha-runner-scale-set-controller logs + if: always() && steps.install_arc_controller.outcome == 'success' + run: | + kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems \ No newline at end of file diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go index 4459a3f315..fecf1b009e 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -1141,12 +1141,13 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func( err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0])) Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status") - updatedRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) - err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, updatedRunnerSet) + currentRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, currentRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + updatedRunnerSet := currentRunnerSet.DeepCopy() updatedRunnerSet.Spec.Replicas = 0 - err = k8sClient.Update(ctx, updatedRunnerSet) + err = k8sClient.Patch(ctx, updatedRunnerSet, client.MergeFrom(currentRunnerSet)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") // wait for server to be called diff --git a/test_e2e_arc/arc_jobs_test.go b/test_e2e_arc/arc_jobs_test.go index 02db92f67c..8b3ca9f89b 100644 --- a/test_e2e_arc/arc_jobs_test.go +++ b/test_e2e_arc/arc_jobs_test.go @@ -92,11 +92,11 @@ func TestARCJobs(t *testing.T) { ) t.Run("Get available pods during job run", func(t *testing.T) { c := http.Client{} - dateTime := os.Getenv("DATE_TIME") + targetArcName := os.Getenv("ARC_NAME") // We are triggering manually a workflow that already exists in the repo. // This workflow is expected to spin up a number of runner pods matching the runners value set in podCountsByType. - url := "https://api.github.com/repos/actions-runner-controller/arc_e2e_test_dummy/actions/workflows/e2e-test-dispatch-workflow.yaml/dispatches" - jsonStr := []byte(fmt.Sprintf(`{"ref":"main", "inputs":{"date_time":"%s"}}`, dateTime)) + url := "https://api.github.com/repos/actions-runner-controller/arc_e2e_test_dummy/actions/workflows/arc-test-workflow.yaml/dispatches" + jsonStr := []byte(fmt.Sprintf(`{"ref":"main", "inputs":{"arc_name":"%s"}}`, targetArcName)) req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr)) if err != nil { From 0d6349e7d8ce4d829bc2874e79ea6b73926a1946 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Tue, 14 Mar 2023 14:12:53 +0100 Subject: [PATCH 135/561] Add gha-runner-scale-set-controller canary build (#2405) --- .github/workflows/publish-canary.yaml | 77 ++++++++++++++++++++++----- 1 file changed, 64 insertions(+), 13 deletions(-) diff --git a/.github/workflows/publish-canary.yaml b/.github/workflows/publish-canary.yaml index 3939030738..d84e78e1c6 100644 --- a/.github/workflows/publish-canary.yaml +++ b/.github/workflows/publish-canary.yaml @@ -8,35 +8,47 @@ on: - master paths-ignore: - '**.md' + - '.github/actions/**' - '.github/ISSUE_TEMPLATE/**' - - '.github/workflows/validate-chart.yaml' - - '.github/workflows/publish-chart.yaml' + - '.github/workflows/e2e-test-dispatch-workflow.yaml' + - '.github/workflows/e2e-test-linux-vm.yaml' - '.github/workflows/publish-arc.yaml' - - '.github/workflows/runners.yaml' - - '.github/workflows/validate-entrypoint.yaml' - - '.github/renovate.*' + - '.github/workflows/publish-chart.yaml' + - '.github/workflows/publish-runner-scale-set.yaml' + - '.github/workflows/release-runners.yaml' + - '.github/workflows/run-codeql.yaml' + - '.github/workflows/run-first-interaction.yaml' + - '.github/workflows/run-stale.yaml' + - '.github/workflows/update-runners.yaml' + - '.github/workflows/validate-arc.yaml' + - '.github/workflows/validate-chart.yaml' + - '.github/workflows/validate-gha-chart.yaml' + - '.github/workflows/validate-runners.yaml' + - '.github/dependabot.yml' + - '.github/RELEASE_NOTE_TEMPLATE.md' - 'runner/**' - '.gitignore' - 'PROJECT' - 'LICENSE' - 'Makefile' -env: - # Safeguard to prevent pushing images to registeries after build - PUSH_TO_REGISTRIES: true - TARGET_ORG: actions-runner-controller - TARGET_REPO: actions-runner-controller - # https://docs.github.com/en/rest/overview/permissions-required-for-github-apps permissions: contents: read + packages: write + +env: + # Safeguard to prevent pushing images to registeries after build + PUSH_TO_REGISTRIES: true jobs: - canary-build: - name: Build and Publish Canary Image + legacy-canary-build: + name: Build and Publish Legacy Canary Image runs-on: ubuntu-latest env: DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + TARGET_ORG: actions-runner-controller + TARGET_REPO: actions-runner-controller steps: - name: Checkout uses: actions/checkout@v3 @@ -68,3 +80,42 @@ jobs: echo "" >> $GITHUB_STEP_SUMMARY echo "**Status:**" >> $GITHUB_STEP_SUMMARY echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY + + canary-build: + name: Build and Publish gha-runner-scale-set-controller Canary Image + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Normalization is needed because upper case characters are not allowed in the repository name + # and the short sha is needed for image tagging + - name: Resolve parameters + id: resolve_parameters + run: | + echo "INFO: Resolving short sha" + echo "short_sha=$(git rev-parse --short ${{ github.ref }})" >> $GITHUB_OUTPUT + echo "INFO: Normalizing repository name (lowercase)" + echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT + + # Unstable builds - run at your own risk + - name: Build and Push + uses: docker/build-push-action@v3 + with: + context: . + file: ./Dockerfile + platforms: linux/amd64,linux/arm64 + build-args: VERSION=canary-"${{ github.ref }}" + push: ${{ env.PUSH_TO_REGISTRIES }} + tags: | + ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary + ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary-"${{ steps.resolve_parameters.outputs.short_sha }}" + cache-from: type=gha + cache-to: type=gha,mode=max \ No newline at end of file From ed5da94462a620b8d0dccc85cc3e7317656ccc5b Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Tue, 14 Mar 2023 14:13:25 +0100 Subject: [PATCH 136/561] Prevent releases on wrong tag name (#2406) --- .github/workflows/publish-arc.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/publish-arc.yaml b/.github/workflows/publish-arc.yaml index 4c3a8075cd..4c18c255d6 100644 --- a/.github/workflows/publish-arc.yaml +++ b/.github/workflows/publish-arc.yaml @@ -29,6 +29,10 @@ jobs: release-controller: name: Release runs-on: ubuntu-latest + # gha-runner-scale-set has its own release workflow. + # We don't want to publish a new actions-runner-controller image + # we release gha-runner-scale-set. + if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }} steps: - name: Checkout uses: actions/checkout@v3 From 95bdf3081240ba60b67d395276a0bde74e0594c7 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Tue, 14 Mar 2023 14:22:38 +0100 Subject: [PATCH 137/561] Add docker buildx pre-requisites (#2408) --- .github/workflows/publish-canary.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/publish-canary.yaml b/.github/workflows/publish-canary.yaml index d84e78e1c6..e0c4c6b55f 100644 --- a/.github/workflows/publish-canary.yaml +++ b/.github/workflows/publish-canary.yaml @@ -104,6 +104,14 @@ jobs: echo "short_sha=$(git rev-parse --short ${{ github.ref }})" >> $GITHUB_OUTPUT echo "INFO: Normalizing repository name (lowercase)" echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + version: latest # Unstable builds - run at your own risk - name: Build and Push From b9a9ebffdf7f5e5a8ba8f43bcdf97c9a1de54933 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 14 Mar 2023 09:23:14 -0400 Subject: [PATCH 138/561] Remove list/watch secrets permission from the manager cluster role. (#2276) --- .../templates/manager_role.yaml | 2 -- .../autoscalinglistener_controller.go | 11 +++++------ .../actions.github.com/ephemeralrunner_controller.go | 5 ++--- main.go | 6 ++++++ 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/charts/gha-runner-scale-set-controller/templates/manager_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_role.yaml index e9457cfac2..e69d599f13 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_role.yaml @@ -133,8 +133,6 @@ rules: - create - delete - get - - list - - watch - update - apiGroups: - "" diff --git a/controllers/actions.github.com/autoscalinglistener_controller.go b/controllers/actions.github.com/autoscalinglistener_controller.go index e82fc8da1f..dec923345a 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller.go +++ b/controllers/actions.github.com/autoscalinglistener_controller.go @@ -86,7 +86,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl. } if !done { log.Info("Waiting for resources to be deleted before removing finalizer") - return ctrl.Result{}, nil + return ctrl.Result{Requeue: true}, nil } log.Info("Removing finalizer") @@ -204,7 +204,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl. return r.createRoleBindingForListener(ctx, autoscalingListener, listenerRole, serviceAccount, log) } - // Create a secret containing proxy config if specifiec + // Create a secret containing proxy config if specified if autoscalingListener.Spec.Proxy != nil { proxySecret := new(corev1.Secret) if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: proxyListenerSecretName(autoscalingListener)}, proxySecret); err != nil { @@ -299,7 +299,6 @@ func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error For(&v1alpha1.AutoscalingListener{}). Owns(&corev1.Pod{}). Owns(&corev1.ServiceAccount{}). - Owns(&corev1.Secret{}). Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)). Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)). WithEventFilter(predicate.ResourceVersionChangedPredicate{}). @@ -503,7 +502,7 @@ func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Con } logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name) - return ctrl.Result{}, nil + return ctrl.Result{Requeue: true}, nil } func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) { @@ -542,7 +541,7 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a logger.Info("Created listener proxy secret", "namespace", newProxySecret.Namespace, "name", newProxySecret.Name) - return ctrl.Result{}, nil + return ctrl.Result{Requeue: true}, nil } func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) { @@ -558,7 +557,7 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con } logger.Info("Updated listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash) - return ctrl.Result{}, nil + return ctrl.Result{Requeue: true}, nil } func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) { diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go index f697091b55..d656e28e57 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller.go +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -107,7 +107,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ } if !done { log.Info("Waiting for ephemeral runner owned resources to be deleted") - return ctrl.Result{}, nil + return ctrl.Result{Requeue: true}, nil } done, err = r.cleanupContainerHooksResources(ctx, ephemeralRunner, log) @@ -643,7 +643,7 @@ func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1 } log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name) - return ctrl.Result{}, nil + return ctrl.Result{Requeue: true}, nil } // updateRunStatusFromPod is responsible for updating non-exiting statuses. @@ -792,7 +792,6 @@ func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&v1alpha1.EphemeralRunner{}). Owns(&corev1.Pod{}). - Owns(&corev1.Secret{}). WithEventFilter(predicate.ResourceVersionChangedPredicate{}). Named("ephemeral-runner-controller"). Complete(r) diff --git a/main.go b/main.go index fbf8b3ff43..d91ada775e 100644 --- a/main.go +++ b/main.go @@ -32,10 +32,12 @@ import ( "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/logging" "github.com/kelseyhightower/envconfig" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" // +kubebuilder:scaffold:imports ) @@ -160,6 +162,10 @@ func main() { Port: port, SyncPeriod: &syncPeriod, Namespace: namespace, + ClientDisableCacheFor: []client.Object{ + &corev1.Secret{}, + &corev1.ConfigMap{}, + }, }) if err != nil { log.Error(err, "unable to start manager") From f86f6cd76032986b4f275f83659a9b852ec4d140 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Tue, 14 Mar 2023 14:29:10 +0100 Subject: [PATCH 139/561] Fix canary image tag name (#2409) --- .github/workflows/publish-canary.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish-canary.yaml b/.github/workflows/publish-canary.yaml index e0c4c6b55f..984cd52325 100644 --- a/.github/workflows/publish-canary.yaml +++ b/.github/workflows/publish-canary.yaml @@ -124,6 +124,6 @@ jobs: push: ${{ env.PUSH_TO_REGISTRIES }} tags: | ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary - ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary-"${{ steps.resolve_parameters.outputs.short_sha }}" + ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary-${{ steps.resolve_parameters.outputs.short_sha }} cache-from: type=gha cache-to: type=gha,mode=max \ No newline at end of file From bba6361babd5b8f7c32422efa09e1354def46354 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 14 Mar 2023 09:45:44 -0400 Subject: [PATCH 140/561] Delay role/rolebinding creation to gha-runner-scale-set installation time (#2363) --- .../templates/_helpers.tpl | 16 +- .../templates/deployment.yaml | 2 + ...er_role.yaml => manager_cluster_role.yaml} | 40 +-- ...yaml => manager_cluster_role_binding.yaml} | 4 +- .../templates/manager_listener_role.yaml | 40 +++ .../manager_listener_role_binding.yaml | 13 + .../tests/template_test.go | 92 +++++- .../templates/_helpers.tpl | 80 +++++ .../templates/manager_role.yaml | 59 ++++ .../templates/manager_role_binding.yaml | 13 + .../tests/template_test.go | 298 ++++++++++++++---- charts/gha-runner-scale-set/tests/values.yaml | 5 +- charts/gha-runner-scale-set/values.yaml | 10 + 13 files changed, 544 insertions(+), 128 deletions(-) rename charts/gha-runner-scale-set-controller/templates/{manager_role.yaml => manager_cluster_role.yaml} (79%) rename charts/gha-runner-scale-set-controller/templates/{manager_role_binding.yaml => manager_cluster_role_binding.yaml} (63%) create mode 100644 charts/gha-runner-scale-set-controller/templates/manager_listener_role.yaml create mode 100644 charts/gha-runner-scale-set-controller/templates/manager_listener_role_binding.yaml create mode 100644 charts/gha-runner-scale-set/templates/manager_role.yaml create mode 100644 charts/gha-runner-scale-set/templates/manager_role_binding.yaml diff --git a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl index ce3409fa04..ebef4a9b78 100644 --- a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl @@ -72,12 +72,20 @@ Create the name of the service account to use {{- end }} {{- end }} -{{- define "gha-runner-scale-set-controller.managerRoleName" -}} -{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-role +{{- define "gha-runner-scale-set-controller.managerClusterRoleName" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-role {{- end }} -{{- define "gha-runner-scale-set-controller.managerRoleBinding" -}} -{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-rolebinding +{{- define "gha-runner-scale-set-controller.managerClusterRoleBinding" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-rolebinding +{{- end }} + +{{- define "gha-runner-scale-set-controller.managerListenerRoleName" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-role +{{- end }} + +{{- define "gha-runner-scale-set-controller.managerListenerRoleBinding" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-rolebinding {{- end }} {{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}} diff --git a/charts/gha-runner-scale-set-controller/templates/deployment.yaml b/charts/gha-runner-scale-set-controller/templates/deployment.yaml index cae43f4e4c..f509aa67fe 100644 --- a/charts/gha-runner-scale-set-controller/templates/deployment.yaml +++ b/charts/gha-runner-scale-set-controller/templates/deployment.yaml @@ -5,6 +5,8 @@ metadata: namespace: {{ .Release.Namespace }} labels: {{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }} + actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }} + actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} spec: replicas: {{ default 1 .Values.replicaCount }} selector: diff --git a/charts/gha-runner-scale-set-controller/templates/manager_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml similarity index 79% rename from charts/gha-runner-scale-set-controller/templates/manager_role.yaml rename to charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml index e69d599f13..deb3c999c6 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml @@ -1,7 +1,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "gha-runner-scale-set-controller.managerRoleName" . }} + name: {{ include "gha-runner-scale-set-controller.managerClusterRoleName" . }} rules: - apiGroups: - actions.github.com @@ -112,43 +112,13 @@ rules: resources: - pods verbs: - - create - - delete - - get - list - - patch - - update - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - get - - update - apiGroups: - "" resources: - serviceaccounts verbs: - - create - - delete - - get - - list - - watch -- apiGroups: - - "" - resources: - - configmaps - verbs: - list - watch - apiGroups: @@ -156,10 +126,6 @@ rules: resources: - rolebindings verbs: - - create - - delete - - get - - update - list - watch - apiGroups: @@ -167,9 +133,5 @@ rules: resources: - roles verbs: - - create - - delete - - get - - update - list - watch diff --git a/charts/gha-runner-scale-set-controller/templates/manager_role_binding.yaml b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role_binding.yaml similarity index 63% rename from charts/gha-runner-scale-set-controller/templates/manager_role_binding.yaml rename to charts/gha-runner-scale-set-controller/templates/manager_cluster_role_binding.yaml index 72549d6ad1..4ce8f9b868 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_role_binding.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role_binding.yaml @@ -1,11 +1,11 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: {{ include "gha-runner-scale-set-controller.managerRoleBinding" . }} + name: {{ include "gha-runner-scale-set-controller.managerClusterRoleBinding" . }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: {{ include "gha-runner-scale-set-controller.managerRoleName" . }} + name: {{ include "gha-runner-scale-set-controller.managerClusterRoleName" . }} subjects: - kind: ServiceAccount name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} diff --git a/charts/gha-runner-scale-set-controller/templates/manager_listener_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_listener_role.yaml new file mode 100644 index 0000000000..86a937776f --- /dev/null +++ b/charts/gha-runner-scale-set-controller/templates/manager_listener_role.yaml @@ -0,0 +1,40 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "gha-runner-scale-set-controller.managerListenerRoleName" . }} + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - get +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - patch + - update +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - delete + - get + - patch + - update \ No newline at end of file diff --git a/charts/gha-runner-scale-set-controller/templates/manager_listener_role_binding.yaml b/charts/gha-runner-scale-set-controller/templates/manager_listener_role_binding.yaml new file mode 100644 index 0000000000..8a2f7f95a4 --- /dev/null +++ b/charts/gha-runner-scale-set-controller/templates/manager_listener_role_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "gha-runner-scale-set-controller.managerListenerRoleBinding" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "gha-runner-scale-set-controller.managerListenerRoleName" . }} +subjects: +- kind: ServiceAccount + name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index fe4bf020bf..b954867ee8 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -147,7 +147,7 @@ func TestTemplate_NotCreateServiceAccount_ServiceAccountNotSet(t *testing.T) { assert.ErrorContains(t, err, "serviceAccount.name must be set if serviceAccount.create is false", "We should get an error because the default service account cannot be used") } -func TestTemplate_CreateManagerRole(t *testing.T) { +func TestTemplate_CreateManagerClusterRole(t *testing.T) { t.Parallel() // Path to the helm chart we will test @@ -162,17 +162,17 @@ func TestTemplate_CreateManagerRole(t *testing.T) { KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } - output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"}) + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role.yaml"}) - var managerRole rbacv1.ClusterRole - helm.UnmarshalK8SYaml(t, output, &managerRole) + var managerClusterRole rbacv1.ClusterRole + helm.UnmarshalK8SYaml(t, output, &managerClusterRole) - assert.Empty(t, managerRole.Namespace, "ClusterRole should not have a namespace") - assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-role", managerRole.Name) - assert.Equal(t, 18, len(managerRole.Rules)) + assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace") + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name) + assert.Equal(t, 15, len(managerClusterRole.Rules)) } -func TestTemplate_ManagerRoleBinding(t *testing.T) { +func TestTemplate_ManagerClusterRoleBinding(t *testing.T) { t.Parallel() // Path to the helm chart we will test @@ -189,16 +189,74 @@ func TestTemplate_ManagerRoleBinding(t *testing.T) { KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } - output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"}) + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role_binding.yaml"}) - var managerRoleBinding rbacv1.ClusterRoleBinding - helm.UnmarshalK8SYaml(t, output, &managerRoleBinding) + var managerClusterRoleBinding rbacv1.ClusterRoleBinding + helm.UnmarshalK8SYaml(t, output, &managerClusterRoleBinding) - assert.Empty(t, managerRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace") - assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-rolebinding", managerRoleBinding.Name) - assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-role", managerRoleBinding.RoleRef.Name) - assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerRoleBinding.Subjects[0].Name) - assert.Equal(t, namespaceName, managerRoleBinding.Subjects[0].Namespace) + assert.Empty(t, managerClusterRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace") + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-rolebinding", managerClusterRoleBinding.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRoleBinding.RoleRef.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerClusterRoleBinding.Subjects[0].Name) + assert.Equal(t, namespaceName, managerClusterRoleBinding.Subjects[0].Namespace) +} + +func TestTemplate_CreateManagerListenerRole(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{}, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_listener_role.yaml"}) + + var managerListenerRole rbacv1.Role + helm.UnmarshalK8SYaml(t, output, &managerListenerRole) + + assert.Equal(t, namespaceName, managerListenerRole.Namespace, "Role should have a namespace") + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRole.Name) + assert.Equal(t, 4, len(managerListenerRole.Rules)) + assert.Equal(t, "pods", managerListenerRole.Rules[0].Resources[0]) + assert.Equal(t, "pods/status", managerListenerRole.Rules[1].Resources[0]) + assert.Equal(t, "secrets", managerListenerRole.Rules[2].Resources[0]) + assert.Equal(t, "serviceaccounts", managerListenerRole.Rules[3].Resources[0]) +} + +func TestTemplate_ManagerListenerRoleBinding(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "serviceAccount.create": "true", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_listener_role_binding.yaml"}) + + var managerListenerRoleBinding rbacv1.RoleBinding + helm.UnmarshalK8SYaml(t, output, &managerListenerRoleBinding) + + assert.Equal(t, namespaceName, managerListenerRoleBinding.Namespace, "RoleBinding should have a namespace") + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-rolebinding", managerListenerRoleBinding.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRoleBinding.RoleRef.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerListenerRoleBinding.Subjects[0].Name) + assert.Equal(t, namespaceName, managerListenerRoleBinding.Subjects[0].Namespace) } func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { @@ -237,6 +295,8 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) + assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"]) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"]) assert.Equal(t, int32(1), *deployment.Spec.Replicas) diff --git a/charts/gha-runner-scale-set/templates/_helpers.tpl b/charts/gha-runner-scale-set/templates/_helpers.tpl index 8511de075b..208e42ea3d 100644 --- a/charts/gha-runner-scale-set/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set/templates/_helpers.tpl @@ -458,3 +458,83 @@ volumeMounts: {{- end }} {{- end }} {{- end }} + +{{- define "gha-runner-scale-set.managerRoleName" -}} +{{- include "gha-runner-scale-set.fullname" . }}-manager-role +{{- end }} + +{{- define "gha-runner-scale-set.managerRoleBinding" -}} +{{- include "gha-runner-scale-set.fullname" . }}-manager-role-binding +{{- end }} + +{{- define "gha-runner-scale-set.managerServiceAccountName" -}} +{{- $searchControllerDeployment := 1 }} +{{- if .Values.controllerServiceAccount }} + {{- if .Values.controllerServiceAccount.name }} + {{- $searchControllerDeployment = 0 }} +{{- .Values.controllerServiceAccount.name }} + {{- end }} +{{- end }} +{{- if eq $searchControllerDeployment 1 }} + {{- $counter := 0 }} + {{- $controllerDeployment := dict }} + {{- $managerServiceAccountName := "" }} + {{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }} + {{- range $key, $val := $deployment.metadata.labels }} + {{- if and (eq $key "app.kubernetes.io/part-of") (eq $val "gha-runner-scale-set-controller") }} + {{- $counter = add $counter 1 }} + {{- $controllerDeployment = $deployment }} + {{- end }} + {{- end }} + {{- end }} + {{- if lt $counter 1 }} + {{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} + {{- end }} + {{- if gt $counter 1 }} + {{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} + {{- end }} + {{- with $controllerDeployment.metadata }} + {{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }} + {{- end }} + {{- if eq $managerServiceAccountName "" }} + {{- fail "No service account name found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} + {{- end }} +{{- $managerServiceAccountName }} +{{- end }} +{{- end }} + +{{- define "gha-runner-scale-set.managerServiceAccountNamespace" -}} +{{- $searchControllerDeployment := 1 }} +{{- if .Values.controllerServiceAccount }} + {{- if .Values.controllerServiceAccount.namespace }} + {{- $searchControllerDeployment = 0 }} +{{- .Values.controllerServiceAccount.namespace }} + {{- end }} +{{- end }} +{{- if eq $searchControllerDeployment 1 }} + {{- $counter := 0 }} + {{- $controllerDeployment := dict }} + {{- $managerServiceAccountNamespace := "" }} + {{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }} + {{- range $key, $val := $deployment.metadata.labels }} + {{- if and (eq $key "app.kubernetes.io/part-of") (eq $val "gha-runner-scale-set-controller") }} + {{- $counter = add $counter 1 }} + {{- $controllerDeployment = $deployment }} + {{- end }} + {{- end }} + {{- end }} + {{- if lt $counter 1 }} + {{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller), consider setting controllerServiceAccount.name to be explicit if you think the discovery is wrong." }} + {{- end }} + {{- if gt $counter 1 }} + {{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller), consider setting controllerServiceAccount.name to be explicit if you think the discovery is wrong." }} + {{- end }} + {{- with $controllerDeployment.metadata }} + {{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }} + {{- end }} + {{- if eq $managerServiceAccountNamespace "" }} + {{- fail "No service account namespace found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name to be explicit if you think the discovery is wrong." }} + {{- end }} +{{- $managerServiceAccountNamespace }} +{{- end }} +{{- end }} diff --git a/charts/gha-runner-scale-set/templates/manager_role.yaml b/charts/gha-runner-scale-set/templates/manager_role.yaml new file mode 100644 index 0000000000..6f4cd9a67e --- /dev/null +++ b/charts/gha-runner-scale-set/templates/manager_role.yaml @@ -0,0 +1,59 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "gha-runner-scale-set.managerRoleName" . }} + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - get +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - create + - delete + - get + - patch + - update +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + verbs: + - create + - delete + - get + - patch + - update +{{- if .Values.githubServerTLS }} +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/charts/gha-runner-scale-set/templates/manager_role_binding.yaml b/charts/gha-runner-scale-set/templates/manager_role_binding.yaml new file mode 100644 index 0000000000..ba38ab0f0b --- /dev/null +++ b/charts/gha-runner-scale-set/templates/manager_role_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "gha-runner-scale-set.managerRoleBinding" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "gha-runner-scale-set.managerRoleName" . }} +subjects: +- kind: ServiceAccount + name: {{ include "gha-runner-scale-set.managerServiceAccountName" . | nindent 4 }} + namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }} \ No newline at end of file diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index 864e63e8b4..24a0832651 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -27,8 +27,10 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -60,6 +62,8 @@ func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) { "githubConfigSecret.github_app_id": "10", "githubConfigSecret.github_app_installation_id": "100", "githubConfigSecret.github_app_private_key": "private_key", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -87,9 +91,11 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAuthInput(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_app_id": "", - "githubConfigSecret.github_token": "", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_app_id": "", + "githubConfigSecret.github_token": "", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -112,8 +118,10 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAppInput(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_app_id": "10", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_app_id": "10", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -136,8 +144,10 @@ func TestTemplateNotRenderedGitHubSecretWithPredefinedSecret(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret": "pre-defined-secret", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secret", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -158,8 +168,10 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -190,9 +202,11 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", - "containerMode.type": "kubernetes", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "containerMode.type": "kubernetes", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -248,9 +262,11 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", - "template.spec.serviceAccountName": "test-service-account", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "template.spec.serviceAccountName": "test-service-account", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -277,8 +293,10 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -322,9 +340,11 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", - "runnerScaleSetName": "test-runner-scale-set-name", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "runnerScaleSetName": "test-runner-scale-set-name", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -375,6 +395,8 @@ func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) { "template.metadata.labels.test2": "test2", "template.metadata.annotations.test3": "test3", "template.metadata.annotations.test4": "test4", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -414,9 +436,11 @@ func TestTemplateRenderedAutoScalingRunnerSet_MaxRunnersValidationError(t *testi options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", - "maxRunners": "-1", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "maxRunners": "-1", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -439,10 +463,12 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinRunnersValidationError(t *testi options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", - "maxRunners": "1", - "minRunners": "-1", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "maxRunners": "1", + "minRunners": "-1", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -465,10 +491,12 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationError(t *te options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", - "maxRunners": "0", - "minRunners": "1", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "maxRunners": "0", + "minRunners": "1", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -491,10 +519,12 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationSameValue(t options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", - "maxRunners": "0", - "minRunners": "0", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "maxRunners": "0", + "minRunners": "0", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -520,9 +550,11 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMin(t options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", - "minRunners": "5", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "minRunners": "5", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -548,9 +580,11 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMax(t options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", - "maxRunners": "5", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "maxRunners": "5", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -605,6 +639,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraVolumes(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + SetValues: map[string]string{ + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, ValuesFiles: []string{testValuesPath}, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -635,6 +673,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + SetValues: map[string]string{ + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, ValuesFiles: []string{testValuesPath}, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -667,6 +709,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_K8S_ExtraVolumes(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + SetValues: map[string]string{ + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, ValuesFiles: []string{testValuesPath}, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -695,9 +741,11 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", - "containerMode.type": "dind", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "containerMode.type": "dind", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -784,9 +832,11 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T) options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret.github_token": "gh_token12345", - "containerMode.type": "kubernetes", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "containerMode.type": "kubernetes", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -839,8 +889,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T) options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret": "pre-defined-secrets", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secrets", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -871,8 +923,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_ErrorOnEmptyPredefinedSecret(t *te options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret": "", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -895,13 +949,15 @@ func TestTemplateRenderedWithProxy(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret": "pre-defined-secrets", - "proxy.http.url": "http://proxy.example.com", - "proxy.http.credentialSecretRef": "http-secret", - "proxy.https.url": "https://proxy.example.com", - "proxy.https.credentialSecretRef": "https-secret", - "proxy.noProxy": "{example.com,example.org}", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secrets", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + "proxy.http.url": "http://proxy.example.com", + "proxy.http.credentialSecretRef": "http-secret", + "proxy.https.url": "https://proxy.example.com", + "proxy.https.credentialSecretRef": "https-secret", + "proxy.noProxy": "{example.com,example.org}", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -961,6 +1017,8 @@ func TestTemplateRenderedWithTLS(t *testing.T) { "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", "githubServerTLS.runnerMountPath": "/runner/mount/path", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -1018,6 +1076,8 @@ func TestTemplateRenderedWithTLS(t *testing.T) { "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", "githubServerTLS.runnerMountPath": "/runner/mount/path/", "containerMode.type": "dind", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -1075,6 +1135,8 @@ func TestTemplateRenderedWithTLS(t *testing.T) { "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", "githubServerTLS.runnerMountPath": "/runner/mount/path", "containerMode.type": "kubernetes", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -1132,6 +1194,8 @@ func TestTemplateRenderedWithTLS(t *testing.T) { "githubConfigSecret": "pre-defined-secrets", "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -1184,7 +1248,9 @@ func TestTemplateRenderedWithTLS(t *testing.T) { "githubConfigSecret": "pre-defined-secrets", "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", - "containerMode.type": "dind", + "containerMode.type": "dind", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -1237,7 +1303,9 @@ func TestTemplateRenderedWithTLS(t *testing.T) { "githubConfigSecret": "pre-defined-secrets", "githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap", "githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem", - "containerMode.type": "kubernetes", + "containerMode.type": "kubernetes", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -1293,8 +1361,10 @@ func TestTemplateNamingConstraints(t *testing.T) { require.NoError(t, err) setValues := map[string]string{ - "githubConfigUrl": "https://github.com/actions", - "githubConfigSecret": "", + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", } tt := map[string]struct { @@ -1339,8 +1409,10 @@ func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) { options := &helm.Options{ SetValues: map[string]string{ - "githubConfigUrl": "https://github.com/actions/", - "githubConfigSecret.github_token": "gh_token12345", + "githubConfigUrl": "https://github.com/actions/", + "githubConfigSecret.github_token": "gh_token12345", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -1354,3 +1426,97 @@ func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) { assert.Equal(t, "test-runners", ars.Name) assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) } + +func TestTemplate_CreateManagerRole(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"}) + + var managerRole rbacv1.Role + helm.UnmarshalK8SYaml(t, output, &managerRole) + + assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release") + assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name) + assert.Equal(t, 5, len(managerRole.Rules)) +} + +func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + "githubServerTLS.certificateFrom.configMapKeyRef.name": "test", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"}) + + var managerRole rbacv1.Role + helm.UnmarshalK8SYaml(t, output, &managerRole) + + assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release") + assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name) + assert.Equal(t, 6, len(managerRole.Rules)) + assert.Equal(t, "configmaps", managerRole.Rules[5].Resources[0]) +} + +func TestTemplate_CreateManagerRoleBinding(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"}) + + var managerRoleBinding rbacv1.RoleBinding + helm.UnmarshalK8SYaml(t, output, &managerRoleBinding) + + assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release") + assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role-binding", managerRoleBinding.Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRoleBinding.RoleRef.Name) + assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name) + assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace) +} diff --git a/charts/gha-runner-scale-set/tests/values.yaml b/charts/gha-runner-scale-set/tests/values.yaml index fc42555e78..425d25a40b 100644 --- a/charts/gha-runner-scale-set/tests/values.yaml +++ b/charts/gha-runner-scale-set/tests/values.yaml @@ -2,4 +2,7 @@ githubConfigUrl: https://github.com/actions/actions-runner-controller githubConfigSecret: github_token: test maxRunners: 10 -minRunners: 5 \ No newline at end of file +minRunners: 5 +controllerServiceAccount: + name: "arc" + namespace: "arc-system" \ No newline at end of file diff --git a/charts/gha-runner-scale-set/values.yaml b/charts/gha-runner-scale-set/values.yaml index 87677fbc5b..97bb1aa789 100644 --- a/charts/gha-runner-scale-set/values.yaml +++ b/charts/gha-runner-scale-set/values.yaml @@ -161,3 +161,13 @@ containerMode: resources: requests: storage: 1Gi + +## Optional controller service account that needs to have required Role and RoleBinding +## to operate this gha-runner-scale-set installation. +## The helm chart will try to find the controller deployment and its service account at installation time. +## In case the helm chart can't find the right service account, you can explicitly pass in the following value +## to help it finish RoleBinding with the right service account. +## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly. +# controllerServiceAccount: +# namespace: arc-system +# name: test-arc-gha-runner-scale-set-controller \ No newline at end of file From 4265c4a0ca26831edfe38cd0ff912eff621a1d7c Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Tue, 14 Mar 2023 15:38:04 +0100 Subject: [PATCH 141/561] Fix GITHUB_TOKEN permissions (#2410) --- .github/workflows/update-runners.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/update-runners.yaml b/.github/workflows/update-runners.yaml index 884ddd6a68..cf39ee1d99 100644 --- a/.github/workflows/update-runners.yaml +++ b/.github/workflows/update-runners.yaml @@ -77,6 +77,7 @@ jobs: permissions: pull-requests: write contents: write + actions: write env: GH_TOKEN: ${{ github.token }} CURRENT_VERSION: ${{ needs.check_versions.outputs.current_version }} From b21c301b982eeefc3b71ebf60c2e66521d5ecddd Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 14 Mar 2023 15:41:03 +0100 Subject: [PATCH 142/561] Update runner to version 2.303.0 (#2411) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- Makefile | 2 +- runner/Makefile | 2 +- runner/VERSION | 2 +- test/e2e/e2e_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 19cef42e9d..3d3a83716a 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ else endif DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) VERSION ?= dev -RUNNER_VERSION ?= 2.302.1 +RUNNER_VERSION ?= 2.303.0 TARGETPLATFORM ?= $(shell arch) RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_TAG ?= ${VERSION} diff --git a/runner/Makefile b/runner/Makefile index e98a933aa6..d7dafebdae 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) -RUNNER_VERSION ?= 2.302.1 +RUNNER_VERSION ?= 2.303.0 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.2.0 DOCKER_VERSION ?= 20.10.23 diff --git a/runner/VERSION b/runner/VERSION index 22155b3fbc..f67bb997a5 100644 --- a/runner/VERSION +++ b/runner/VERSION @@ -1 +1 @@ -2.302.1 +2.303.0 diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index e4c3a233bc..66ac1c0393 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -41,7 +41,7 @@ var ( testResultCMNamePrefix = "test-result-" - RunnerVersion = "2.302.1" + RunnerVersion = "2.303.0" ) // If you're willing to run this test via VS Code "run test" or "debug test", From 26887bcdd2b8d049534898fd3f786ba716284cde Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 14 Mar 2023 10:52:25 -0400 Subject: [PATCH 143/561] Support the controller to watching a single namespace. (#2374) --- .github/workflows/e2e-test-linux-vm.yaml | 102 ++++++++ .../templates/_helpers.tpl | 8 + .../templates/deployment.yaml | 6 + .../templates/manager_cluster_role.yaml | 9 +- .../manager_cluster_role_binding.yaml | 4 +- ...ager_single_namespace_controller_role.yaml | 84 +++++++ ...gle_namespace_controller_role_binding.yaml | 15 ++ .../manager_single_namespace_watch_role.yaml | 117 +++++++++ ...r_single_namespace_watch_role_binding.yaml | 15 ++ .../tests/template_test.go | 225 ++++++++++++++++++ .../values.yaml | 4 + .../templates/_helpers.tpl | 88 +++++-- charts/gha-runner-scale-set/values.yaml | 2 +- main.go | 22 +- 14 files changed, 666 insertions(+), 35 deletions(-) create mode 100644 charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role.yaml create mode 100644 charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role_binding.yaml create mode 100644 charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml create mode 100644 charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role_binding.yaml diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 88ba0dd1ee..d0c71c210d 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -120,6 +120,108 @@ jobs: helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} + - name: Dump gha-runner-scale-set-controller logs + if: always() && steps.install_arc_controller.outcome == 'success' + run: | + kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems + + single-namespace-setup: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Resolve inputs + id: resolved_inputs + run: | + TARGET_ORG="${{env.TARGET_ORG}}" + TARGET_REPO="${{env.TARGET_REPO}}" + if [ ! -z "${{inputs.target_org}}" ]; then + TARGET_ORG="${{inputs.target_org}}" + fi + if [ ! -z "${{inputs.target_repo}}" ]; then + TARGET_REPO="${{inputs.target_repo}}" + fi + echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT + echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + + - uses: ./.github/actions/setup-arc-e2e + id: setup + with: + github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} + github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} + github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} + docker-image-name: ${{env.IMAGE_NAME}} + docker-image-tag: ${{env.IMAGE_VERSION}} + + - name: Install gha-runner-scale-set-controller + id: install_arc_controller + run: | + kubectl create namespace arc-runners + helm install arc \ + --namespace "arc-systems" \ + --create-namespace \ + --set image.repository=${{ env.IMAGE_NAME }} \ + --set image.tag=${{ env.IMAGE_VERSION }} \ + --set flags.watchSingleNamespace=arc-runners \ + ./charts/gha-runner-scale-set-controller \ + --debug + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller + kubectl get pod -n arc-systems + kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems + + - name: Install gha-runner-scale-set + id: install_arc + run: | + ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + helm install "$ARC_NAME" \ + --namespace "arc-runners" \ + --create-namespace \ + --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ + ./charts/gha-runner-scale-set \ + --debug + echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl get pod -n arc-systems + + - name: Test ARC scales pods up and down + run: | + export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" + export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" + go test ./test_e2e_arc -v + + - name: Uninstall gha-runner-scale-set + if: always() && steps.install_arc.outcome == 'success' + run: | + helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners + kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} + - name: Dump gha-runner-scale-set-controller logs if: always() && steps.install_arc_controller.outcome == 'success' run: | diff --git a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl index ebef4a9b78..eb37c21fd6 100644 --- a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl @@ -80,6 +80,14 @@ Create the name of the service account to use {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-rolebinding {{- end }} +{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-role +{{- end }} + +{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-rolebinding +{{- end }} + {{- define "gha-runner-scale-set-controller.managerListenerRoleName" -}} {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-role {{- end }} diff --git a/charts/gha-runner-scale-set-controller/templates/deployment.yaml b/charts/gha-runner-scale-set-controller/templates/deployment.yaml index f509aa67fe..a8f02e6233 100644 --- a/charts/gha-runner-scale-set-controller/templates/deployment.yaml +++ b/charts/gha-runner-scale-set-controller/templates/deployment.yaml @@ -7,6 +7,9 @@ metadata: {{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }} actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }} actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} + {{- if .Values.flags.watchSingleNamespace }} + actions.github.com/controller-watch-single-namespace: {{ .Values.flags.watchSingleNamespace }} + {{- end }} spec: replicas: {{ default 1 .Values.replicaCount }} selector: @@ -53,6 +56,9 @@ spec: {{- with .Values.flags.logLevel }} - "--log-level={{ . }}" {{- end }} + {{- with .Values.flags.watchSingleNamespace }} + - "--watch-single-namespace={{ . }}" + {{- end }} command: - "/manager" env: diff --git a/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml index deb3c999c6..3ea3127902 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml @@ -1,3 +1,4 @@ +{{- if empty .Values.flags.watchSingleNamespace }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -20,6 +21,7 @@ rules: resources: - autoscalingrunnersets/finalizers verbs: + - patch - update - apiGroups: - actions.github.com @@ -54,6 +56,7 @@ rules: resources: - autoscalinglisteners/finalizers verbs: + - patch - update - apiGroups: - actions.github.com @@ -92,13 +95,8 @@ rules: resources: - ephemeralrunners/finalizers verbs: - - create - - delete - - get - - list - patch - update - - watch - apiGroups: - actions.github.com resources: @@ -135,3 +133,4 @@ rules: verbs: - list - watch +{{- end }} diff --git a/charts/gha-runner-scale-set-controller/templates/manager_cluster_role_binding.yaml b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role_binding.yaml index 4ce8f9b868..041d73a935 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_cluster_role_binding.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role_binding.yaml @@ -1,3 +1,4 @@ +{{- if empty .Values.flags.watchSingleNamespace }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -9,4 +10,5 @@ roleRef: subjects: - kind: ServiceAccount name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} \ No newline at end of file + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role.yaml new file mode 100644 index 0000000000..a72dc7387d --- /dev/null +++ b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role.yaml @@ -0,0 +1,84 @@ +{{- if .Values.flags.watchSingleNamespace }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }} + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners/status + verbs: + - get + - patch + - update +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners/finalizers + verbs: + - patch + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + verbs: + - list + - watch +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets + verbs: + - list + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets + verbs: + - list + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners + verbs: + - list + - watch +{{- end }} \ No newline at end of file diff --git a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role_binding.yaml b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role_binding.yaml new file mode 100644 index 0000000000..3423b9ddc9 --- /dev/null +++ b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role_binding.yaml @@ -0,0 +1,15 @@ +{{- if .Values.flags.watchSingleNamespace }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }} +subjects: +- kind: ServiceAccount + name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml new file mode 100644 index 0000000000..bf840bcf41 --- /dev/null +++ b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml @@ -0,0 +1,117 @@ +{{- if .Values.flags.watchSingleNamespace }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }} + namespace: {{ .Values.flags.watchSingleNamespace }} +rules: +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets/finalizers + verbs: + - patch + - update +- apiGroups: + - actions.github.com + resources: + - autoscalingrunnersets/status + verbs: + - get + - patch + - update +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets/status + verbs: + - get + - patch + - update +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners/finalizers + verbs: + - patch + - update +- apiGroups: + - actions.github.com + resources: + - ephemeralrunners/status + verbs: + - get + - patch + - update +- apiGroups: + - actions.github.com + resources: + - autoscalinglisteners + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + verbs: + - list + - watch +{{- end }} \ No newline at end of file diff --git a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role_binding.yaml b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role_binding.yaml new file mode 100644 index 0000000000..3edd0c61ec --- /dev/null +++ b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role_binding.yaml @@ -0,0 +1,15 @@ +{{- if .Values.flags.watchSingleNamespace }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }} + namespace: {{ .Values.flags.watchSingleNamespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }} +subjects: +- kind: ServiceAccount + name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index b954867ee8..2a00370ae7 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -170,6 +170,12 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) { assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace") assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name) assert.Equal(t, 15, len(managerClusterRole.Rules)) + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"}) + assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role.yaml in chart", "We should get an error because the template should be skipped") + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role.yaml"}) + assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_watch_role.yaml in chart", "We should get an error because the template should be skipped") } func TestTemplate_ManagerClusterRoleBinding(t *testing.T) { @@ -199,6 +205,12 @@ func TestTemplate_ManagerClusterRoleBinding(t *testing.T) { assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRoleBinding.RoleRef.Name) assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerClusterRoleBinding.Subjects[0].Name) assert.Equal(t, namespaceName, managerClusterRoleBinding.Subjects[0].Namespace) + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"}) + assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role_binding.yaml in chart", "We should get an error because the template should be skipped") + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"}) + assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_watch_role_binding.yaml in chart", "We should get an error because the template should be skipped") } func TestTemplate_CreateManagerListenerRole(t *testing.T) { @@ -297,6 +309,7 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"]) assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"]) + assert.NotContains(t, deployment.Labels, "actions.github.com/controller-watch-single-namespace") assert.Equal(t, int32(1), *deployment.Spec.Replicas) @@ -595,3 +608,215 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) { assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) } + +func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") + require.NoError(t, err) + + chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml")) + require.NoError(t, err) + + chart := new(Chart) + err = yaml.Unmarshal(chartContent, chart) + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "image.tag": "dev", + "flags.watchSingleNamespace": "demo", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"}) + + var deployment appsv1.Deployment + helm.UnmarshalK8SYaml(t, output, &deployment) + + assert.Equal(t, namespaceName, deployment.Namespace) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name) + assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"]) + assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) + assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) + assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) + assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"]) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"]) + assert.Equal(t, "demo", deployment.Labels["actions.github.com/controller-watch-single-namespace"]) + + assert.Equal(t, int32(1), *deployment.Spec.Replicas) + + assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) + assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"]) + + assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) + assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"]) + + assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"]) + + assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.ServiceAccountName) + assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext) + assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName) + assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds) + assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1) + assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name) + assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir) + + assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0) + assert.Nil(t, deployment.Spec.Template.Spec.Affinity) + assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0) + + managerImage := "ghcr.io/actions/gha-runner-scale-set-controller:dev" + + assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) + assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "ghcr.io/actions/gha-runner-scale-set-controller:dev", deployment.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) + assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3) + assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) + assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1]) + assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2]) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2) + assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) + assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value) + + assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name) + assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath) + + assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources) + assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1) + assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name) + assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath) +} + +func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "flags.watchSingleNamespace": "demo", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role.yaml"}) + assert.ErrorContains(t, err, "could not find template templates/manager_cluster_role.yaml in chart", "We should get an error because the template should be skipped") +} + +func TestTemplate_WatchSingleNamespace_NotManagerClusterRoleBinding(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "serviceAccount.create": "true", + "flags.watchSingleNamespace": "demo", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_cluster_role_binding.yaml"}) + assert.ErrorContains(t, err, "could not find template templates/manager_cluster_role_binding.yaml in chart", "We should get an error because the template should be skipped") +} + +func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "flags.watchSingleNamespace": "demo", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"}) + + var managerSingleNamespaceControllerRole rbacv1.Role + helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRole) + + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRole.Name) + assert.Equal(t, namespaceName, managerSingleNamespaceControllerRole.Namespace) + assert.Equal(t, 10, len(managerSingleNamespaceControllerRole.Rules)) + + output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role.yaml"}) + + var managerSingleNamespaceWatchRole rbacv1.Role + helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRole) + + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name) + assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace) + assert.Equal(t, 13, len(managerSingleNamespaceWatchRole.Rules)) +} + +func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "flags.watchSingleNamespace": "demo", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"}) + + var managerSingleNamespaceControllerRoleBinding rbacv1.RoleBinding + helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRoleBinding) + + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceControllerRoleBinding.Name) + assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Namespace) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRoleBinding.RoleRef.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceControllerRoleBinding.Subjects[0].Name) + assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Subjects[0].Namespace) + + output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"}) + + var managerSingleNamespaceWatchRoleBinding rbacv1.RoleBinding + helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRoleBinding) + + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceWatchRoleBinding.Name) + assert.Equal(t, "demo", managerSingleNamespaceWatchRoleBinding.Namespace) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRoleBinding.RoleRef.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceWatchRoleBinding.Subjects[0].Name) + assert.Equal(t, namespaceName, managerSingleNamespaceWatchRoleBinding.Subjects[0].Namespace) +} diff --git a/charts/gha-runner-scale-set-controller/values.yaml b/charts/gha-runner-scale-set-controller/values.yaml index 4e23294462..055d68e933 100644 --- a/charts/gha-runner-scale-set-controller/values.yaml +++ b/charts/gha-runner-scale-set-controller/values.yaml @@ -68,3 +68,7 @@ flags: # Log level can be set here with one of the following values: "debug", "info", "warn", "error". # Defaults to "debug". logLevel: "debug" + + # Restricts the controller to only watch resources in the desired namespace. + # Defaults to watch all namespaces when unset. + # watchSingleNamespace: "" \ No newline at end of file diff --git a/charts/gha-runner-scale-set/templates/_helpers.tpl b/charts/gha-runner-scale-set/templates/_helpers.tpl index 208e42ea3d..013ca73e1c 100644 --- a/charts/gha-runner-scale-set/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set/templates/_helpers.tpl @@ -476,25 +476,46 @@ volumeMounts: {{- end }} {{- end }} {{- if eq $searchControllerDeployment 1 }} - {{- $counter := 0 }} + {{- $multiNamespacesCounter := 0 }} + {{- $singleNamespaceCounter := 0 }} {{- $controllerDeployment := dict }} + {{- $singleNamespaceControllerDeployments := dict }} {{- $managerServiceAccountName := "" }} {{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }} - {{- range $key, $val := $deployment.metadata.labels }} - {{- if and (eq $key "app.kubernetes.io/part-of") (eq $val "gha-runner-scale-set-controller") }} - {{- $counter = add $counter 1 }} - {{- $controllerDeployment = $deployment }} + {{- if kindIs "map" $deployment.metadata.labels }} + {{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }} + {{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }} + {{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }} + {{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}} + {{- else }} + {{- $multiNamespacesCounter = add $multiNamespacesCounter 1 }} + {{- $controllerDeployment = $deployment }} + {{- end }} {{- end }} {{- end }} {{- end }} - {{- if lt $counter 1 }} - {{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} + {{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }} + {{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} + {{- end }} + {{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }} + {{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- end }} - {{- if gt $counter 1 }} - {{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} + {{- if gt $multiNamespacesCounter 1 }} + {{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- end }} - {{- with $controllerDeployment.metadata }} - {{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }} + {{- if eq $multiNamespacesCounter 1 }} + {{- with $controllerDeployment.metadata }} + {{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }} + {{- end }} + {{- else if gt $singleNamespaceCounter 0 }} + {{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }} + {{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }} + {{- with $controllerDeployment.metadata }} + {{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }} + {{- end }} + {{- else }} + {{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} + {{- end }} {{- end }} {{- if eq $managerServiceAccountName "" }} {{- fail "No service account name found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} @@ -512,28 +533,49 @@ volumeMounts: {{- end }} {{- end }} {{- if eq $searchControllerDeployment 1 }} - {{- $counter := 0 }} + {{- $multiNamespacesCounter := 0 }} + {{- $singleNamespaceCounter := 0 }} {{- $controllerDeployment := dict }} + {{- $singleNamespaceControllerDeployments := dict }} {{- $managerServiceAccountNamespace := "" }} {{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }} - {{- range $key, $val := $deployment.metadata.labels }} - {{- if and (eq $key "app.kubernetes.io/part-of") (eq $val "gha-runner-scale-set-controller") }} - {{- $counter = add $counter 1 }} - {{- $controllerDeployment = $deployment }} + {{- if kindIs "map" $deployment.metadata.labels }} + {{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }} + {{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }} + {{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }} + {{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}} + {{- else }} + {{- $multiNamespacesCounter = add $multiNamespacesCounter 1 }} + {{- $controllerDeployment = $deployment }} + {{- end }} {{- end }} {{- end }} {{- end }} - {{- if lt $counter 1 }} - {{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller), consider setting controllerServiceAccount.name to be explicit if you think the discovery is wrong." }} + {{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }} + {{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} + {{- end }} + {{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }} + {{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- end }} - {{- if gt $counter 1 }} - {{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller), consider setting controllerServiceAccount.name to be explicit if you think the discovery is wrong." }} + {{- if gt $multiNamespacesCounter 1 }} + {{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- end }} - {{- with $controllerDeployment.metadata }} - {{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }} + {{- if eq $multiNamespacesCounter 1 }} + {{- with $controllerDeployment.metadata }} + {{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }} + {{- end }} + {{- else if gt $singleNamespaceCounter 0 }} + {{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }} + {{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }} + {{- with $controllerDeployment.metadata }} + {{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }} + {{- end }} + {{- else }} + {{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} + {{- end }} {{- end }} {{- if eq $managerServiceAccountNamespace "" }} - {{- fail "No service account namespace found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name to be explicit if you think the discovery is wrong." }} + {{- fail "No service account namespace found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- end }} {{- $managerServiceAccountNamespace }} {{- end }} diff --git a/charts/gha-runner-scale-set/values.yaml b/charts/gha-runner-scale-set/values.yaml index 97bb1aa789..7cb190f733 100644 --- a/charts/gha-runner-scale-set/values.yaml +++ b/charts/gha-runner-scale-set/values.yaml @@ -155,7 +155,7 @@ containerMode: ## the following is required when containerMode.type=kubernetes kubernetesModeWorkVolumeClaim: accessModes: ["ReadWriteOnce"] - # For testing, use https://github.com/rancher/local-path-provisioner to provide dynamic provision volume + # For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath # TODO: remove before release storageClassName: "dynamic-blob-storage" resources: diff --git a/main.go b/main.go index d91ada775e..ac9a79c542 100644 --- a/main.go +++ b/main.go @@ -37,6 +37,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" // +kubebuilder:scaffold:imports ) @@ -90,6 +91,7 @@ func main() { namespace string logLevel string logFormat string + watchSingleNamespace string autoScalerImagePullSecrets stringSlice @@ -126,6 +128,7 @@ func main() { flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.") flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions/actions-runner-controller/issues/321 for more information") flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.") + flag.StringVar(&watchSingleNamespace, "watch-single-namespace", "", "Restrict to watch for custom resources in a single namespace.") flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`) flag.StringVar(&logFormat, "log-format", "text", `The log format. Valid options are "text" and "json". Defaults to "text"`) flag.BoolVar(&autoScalingRunnerSetOnly, "auto-scaling-runner-set-only", false, "Make controller only reconcile AutoRunnerScaleSet object.") @@ -149,13 +152,27 @@ func main() { ctrl.SetLogger(log) + managerNamespace := "" + var newCache cache.NewCacheFunc + if autoScalingRunnerSetOnly { // We don't support metrics for AutoRunnerScaleSet for now metricsAddr = "0" + + managerNamespace = os.Getenv("CONTROLLER_MANAGER_POD_NAMESPACE") + if managerNamespace == "" { + log.Error(err, "unable to obtain manager pod namespace") + os.Exit(1) + } + + if len(watchSingleNamespace) > 0 { + newCache = cache.MultiNamespacedCacheBuilder([]string{managerNamespace, watchSingleNamespace}) + } } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, + NewCache: newCache, MetricsBindAddress: metricsAddr, LeaderElection: enableLeaderElection, LeaderElectionID: leaderElectionId, @@ -178,11 +195,6 @@ func main() { log.Error(err, "unable to obtain listener image") os.Exit(1) } - managerNamespace := os.Getenv("CONTROLLER_MANAGER_POD_NAMESPACE") - if managerNamespace == "" { - log.Error(err, "unable to obtain manager pod namespace") - os.Exit(1) - } actionsMultiClient := actions.NewMultiClient( "actions-runner-controller/"+build.Version, From 7b23b04e0223cf30088c78ecd88589888c11e03a Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Wed, 15 Mar 2023 11:10:09 -0400 Subject: [PATCH 144/561] Get RunnerScaleSet based on both RunnerGroupId and Name. (#2413) --- cmd/githubrunnerscalesetlistener/main_test.go | 2 +- .../autoscalingrunnerset_controller.go | 31 +++++++++++-------- github/actions/client.go | 6 ++-- .../actions/client_runner_scale_set_test.go | 14 ++++----- github/actions/fake/client.go | 2 +- github/actions/mock_ActionsService.go | 14 ++++----- 6 files changed, 37 insertions(+), 32 deletions(-) diff --git a/cmd/githubrunnerscalesetlistener/main_test.go b/cmd/githubrunnerscalesetlistener/main_test.go index e4c1df0320..6d11194f9d 100644 --- a/cmd/githubrunnerscalesetlistener/main_test.go +++ b/cmd/githubrunnerscalesetlistener/main_test.go @@ -144,7 +144,7 @@ func TestCustomerServerRootCA(t *testing.T) { client, err := newActionsClientFromConfig(config, creds) require.NoError(t, err) - _, err = client.GetRunnerScaleSet(ctx, "test") + _, err = client.GetRunnerScaleSet(ctx, 1, "test") require.NoError(t, err) assert.True(t, serverCalledSuccessfully) } diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index b279e084e2..b833e57d7c 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -316,24 +316,29 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set") return ctrl.Result{}, err } - runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, autoscalingRunnerSet.Spec.RunnerScaleSetName) + + runnerGroupId := 1 + if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 { + runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup) + if err != nil { + logger.Error(err, "Failed to get runner group by name", "runnerGroup", autoscalingRunnerSet.Spec.RunnerGroup) + return ctrl.Result{}, err + } + + runnerGroupId = int(runnerGroup.ID) + } + + runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, runnerGroupId, autoscalingRunnerSet.Spec.RunnerScaleSetName) if err != nil { - logger.Error(err, "Failed to get runner scale set from Actions service") + logger.Error(err, "Failed to get runner scale set from Actions service", + "runnerGroupId", + strconv.Itoa(runnerGroupId), + "runnerScaleSetName", + autoscalingRunnerSet.Spec.RunnerScaleSetName) return ctrl.Result{}, err } - runnerGroupId := 1 if runnerScaleSet == nil { - if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 { - runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup) - if err != nil { - logger.Error(err, "Failed to get runner group by name", "runnerGroup", autoscalingRunnerSet.Spec.RunnerGroup) - return ctrl.Result{}, err - } - - runnerGroupId = int(runnerGroup.ID) - } - runnerScaleSet, err = actionsClient.CreateRunnerScaleSet( ctx, &actions.RunnerScaleSet{ diff --git a/github/actions/client.go b/github/actions/client.go index 7d68bd391a..51fe75d8e3 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -31,7 +31,7 @@ const ( //go:generate mockery --inpackage --name=ActionsService type ActionsService interface { - GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*RunnerScaleSet, error) + GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*RunnerScaleSet, error) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int) (*RunnerScaleSet, error) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (*RunnerGroup, error) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) @@ -285,8 +285,8 @@ func (c *Client) NewActionsServiceRequest(ctx context.Context, method, path stri return req, nil } -func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*RunnerScaleSet, error) { - path := fmt.Sprintf("/%s?name=%s", scaleSetEndpoint, runnerScaleSetName) +func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*RunnerScaleSet, error) { + path := fmt.Sprintf("/%s?runnerGroupId=%d&name=%s", scaleSetEndpoint, runnerGroupId, runnerScaleSetName) req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil) if err != nil { return nil, err diff --git a/github/actions/client_runner_scale_set_test.go b/github/actions/client_runner_scale_set_test.go index d313d013cc..7f74190e90 100644 --- a/github/actions/client_runner_scale_set_test.go +++ b/github/actions/client_runner_scale_set_test.go @@ -34,7 +34,7 @@ func TestGetRunnerScaleSet(t *testing.T) { client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) - got, err := client.GetRunnerScaleSet(ctx, scaleSetName) + got, err := client.GetRunnerScaleSet(ctx, 1, scaleSetName) require.NoError(t, err) assert.Equal(t, want, got) }) @@ -50,7 +50,7 @@ func TestGetRunnerScaleSet(t *testing.T) { client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) - _, err = client.GetRunnerScaleSet(ctx, scaleSetName) + _, err = client.GetRunnerScaleSet(ctx, 1, scaleSetName) require.NoError(t, err) expectedPath := "/tenant/123/_apis/runtime/runnerscalesets" @@ -67,7 +67,7 @@ func TestGetRunnerScaleSet(t *testing.T) { client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) - _, err = client.GetRunnerScaleSet(ctx, scaleSetName) + _, err = client.GetRunnerScaleSet(ctx, 1, scaleSetName) assert.NotNil(t, err) }) @@ -80,7 +80,7 @@ func TestGetRunnerScaleSet(t *testing.T) { client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) - _, err = client.GetRunnerScaleSet(ctx, scaleSetName) + _, err = client.GetRunnerScaleSet(ctx, 1, scaleSetName) assert.NotNil(t, err) }) @@ -102,7 +102,7 @@ func TestGetRunnerScaleSet(t *testing.T) { ) require.NoError(t, err) - _, err = client.GetRunnerScaleSet(ctx, scaleSetName) + _, err = client.GetRunnerScaleSet(ctx, 1, scaleSetName) assert.NotNil(t, err) expectedRetry := retryMax + 1 assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) @@ -118,7 +118,7 @@ func TestGetRunnerScaleSet(t *testing.T) { client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) - got, err := client.GetRunnerScaleSet(ctx, scaleSetName) + got, err := client.GetRunnerScaleSet(ctx, 1, scaleSetName) require.NoError(t, err) assert.Equal(t, want, got) }) @@ -133,7 +133,7 @@ func TestGetRunnerScaleSet(t *testing.T) { client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) - _, err = client.GetRunnerScaleSet(ctx, scaleSetName) + _, err = client.GetRunnerScaleSet(ctx, 1, scaleSetName) require.NotNil(t, err) assert.Equal(t, wantErr.Error(), err.Error()) }) diff --git a/github/actions/fake/client.go b/github/actions/fake/client.go index 0729425d54..a4f17e4d3a 100644 --- a/github/actions/fake/client.go +++ b/github/actions/fake/client.go @@ -215,7 +215,7 @@ func (f *FakeClient) applyDefaults() { f.getRunnerByNameResult.RunnerReference = defaultRunnerReference } -func (f *FakeClient) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*actions.RunnerScaleSet, error) { +func (f *FakeClient) GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*actions.RunnerScaleSet, error) { return f.getRunnerScaleSetResult.RunnerScaleSet, f.getRunnerScaleSetResult.err } diff --git a/github/actions/mock_ActionsService.go b/github/actions/mock_ActionsService.go index ba10de8daa..ad5050bcc6 100644 --- a/github/actions/mock_ActionsService.go +++ b/github/actions/mock_ActionsService.go @@ -263,13 +263,13 @@ func (_m *MockActionsService) GetRunnerGroupByName(ctx context.Context, runnerGr return r0, r1 } -// GetRunnerScaleSet provides a mock function with given fields: ctx, runnerScaleSetName -func (_m *MockActionsService) GetRunnerScaleSet(ctx context.Context, runnerScaleSetName string) (*RunnerScaleSet, error) { - ret := _m.Called(ctx, runnerScaleSetName) +// GetRunnerScaleSet provides a mock function with given fields: ctx, runnerGroupId, runnerScaleSetName +func (_m *MockActionsService) GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*RunnerScaleSet, error) { + ret := _m.Called(ctx, runnerGroupId, runnerScaleSetName) var r0 *RunnerScaleSet - if rf, ok := ret.Get(0).(func(context.Context, string) *RunnerScaleSet); ok { - r0 = rf(ctx, runnerScaleSetName) + if rf, ok := ret.Get(0).(func(context.Context, int, string) *RunnerScaleSet); ok { + r0 = rf(ctx, runnerGroupId, runnerScaleSetName) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*RunnerScaleSet) @@ -277,8 +277,8 @@ func (_m *MockActionsService) GetRunnerScaleSet(ctx context.Context, runnerScale } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { - r1 = rf(ctx, runnerScaleSetName) + if rf, ok := ret.Get(1).(func(context.Context, int, string) error); ok { + r1 = rf(ctx, runnerGroupId, runnerScaleSetName) } else { r1 = ret.Error(1) } From 05165305ccb47609ca8866f8e05c41d400058361 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Wed, 15 Mar 2023 12:17:11 -0400 Subject: [PATCH 145/561] Adding e2e workflows to test dind, kube mode and proxy (#2412) --- .github/actions/setup-arc-e2e/action.yaml | 7 +- .github/workflows/e2e-test-linux-vm.yaml | 506 +++++++++++++++++++++- test_e2e_arc/arc_jobs_test.go | 12 +- 3 files changed, 518 insertions(+), 7 deletions(-) diff --git a/.github/actions/setup-arc-e2e/action.yaml b/.github/actions/setup-arc-e2e/action.yaml index 1f533b16ba..922bb9f8c9 100644 --- a/.github/actions/setup-arc-e2e/action.yaml +++ b/.github/actions/setup-arc-e2e/action.yaml @@ -49,12 +49,11 @@ runs: cache-from: type=gha cache-to: type=gha,mode=max - - name: Create Kind cluster and load image + - name: Create minikube cluster and load image shell: bash run: | - PATH=$(go env GOPATH)/bin:$PATH - kind create cluster --name arc-e2e - kind load docker-image ${{inputs.docker-image-name}}:${{inputs.docker-image-tag}} --name arc-e2e + minikube start + minikube image load ${{inputs.docker-image-name}}:${{inputs.docker-image-tag}} - name: Get configure token id: config-token diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index d0c71c210d..621d6962fe 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -27,6 +27,8 @@ env: jobs: default-setup: runs-on: ubuntu-latest + env: + WORKFLOW_FILE: "arc-test-workflow.yaml" steps: - uses: actions/checkout@v3 @@ -109,9 +111,11 @@ jobs: kubectl get pod -n arc-systems - name: Test ARC scales pods up and down + id: test run: | export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" + export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}" go test ./test_e2e_arc -v - name: Uninstall gha-runner-scale-set @@ -125,8 +129,19 @@ jobs: run: | kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems + - name: Job summary + if: always() && steps.install_arc.outcome == 'success' + run: | + cat <<-EOF > $GITHUB_STEP_SUMMARY + | **Outcome** | ${{ steps.test.outcome }} | + |----------------|--------------------------------------------- | + | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | + EOF + single-namespace-setup: runs-on: ubuntu-latest + env: + WORKFLOW_FILE: "arc-test-workflow.yaml" steps: - uses: actions/checkout@v3 @@ -211,9 +226,125 @@ jobs: kubectl get pod -n arc-systems - name: Test ARC scales pods up and down + id: test + run: | + export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" + export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" + export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}" + go test ./test_e2e_arc -v + + - name: Uninstall gha-runner-scale-set + if: always() && steps.install_arc.outcome == 'success' + run: | + helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners + kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} + + - name: Dump gha-runner-scale-set-controller logs + if: always() && steps.install_arc_controller.outcome == 'success' + run: | + kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems + + - name: Job summary + if: always() && steps.install_arc.outcome == 'success' + run: | + cat <<-EOF > $GITHUB_STEP_SUMMARY + | **Outcome** | ${{ steps.test.outcome }} | + |----------------|--------------------------------------------- | + | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | + EOF + + dind-mode-setup: + runs-on: ubuntu-latest + env: + WORKFLOW_FILE: arc-test-dind-workflow.yaml + steps: + - uses: actions/checkout@v3 + + - name: Resolve inputs + id: resolved_inputs + run: | + TARGET_ORG="${{env.TARGET_ORG}}" + TARGET_REPO="${{env.TARGET_REPO}}" + if [ ! -z "${{inputs.target_org}}" ]; then + TARGET_ORG="${{inputs.target_org}}" + fi + if [ ! -z "${{inputs.target_repo}}" ]; then + TARGET_REPO="${{inputs.target_repo}}" + fi + echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT + echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + + - uses: ./.github/actions/setup-arc-e2e + id: setup + with: + github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} + github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} + github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} + docker-image-name: ${{env.IMAGE_NAME}} + docker-image-tag: ${{env.IMAGE_VERSION}} + + - name: Install gha-runner-scale-set-controller + id: install_arc_controller + run: | + helm install arc \ + --namespace "arc-systems" \ + --create-namespace \ + --set image.repository=${{ env.IMAGE_NAME }} \ + --set image.tag=${{ env.IMAGE_VERSION }} \ + ./charts/gha-runner-scale-set-controller \ + --debug + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller + kubectl get pod -n arc-systems + kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems + + - name: Install gha-runner-scale-set + id: install_arc + run: | + ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + helm install "$ARC_NAME" \ + --namespace "arc-runners" \ + --create-namespace \ + --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ + --set containerMode.type="dind" \ + ./charts/gha-runner-scale-set \ + --debug + echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl get pod -n arc-systems + + - name: Test ARC scales pods up and down + id: test run: | export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" + export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}" go test ./test_e2e_arc -v - name: Uninstall gha-runner-scale-set @@ -225,4 +356,377 @@ jobs: - name: Dump gha-runner-scale-set-controller logs if: always() && steps.install_arc_controller.outcome == 'success' run: | - kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems \ No newline at end of file + kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems + + - name: Job summary + if: always() && steps.install_arc.outcome == 'success' + run: | + cat <<-EOF > $GITHUB_STEP_SUMMARY + | **Outcome** | ${{ steps.test.outcome }} | + |----------------|--------------------------------------------- | + | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | + EOF + + kubernetes-mode-setup: + runs-on: ubuntu-latest + env: + WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml" + steps: + - uses: actions/checkout@v3 + + - name: Resolve inputs + id: resolved_inputs + run: | + TARGET_ORG="${{env.TARGET_ORG}}" + TARGET_REPO="${{env.TARGET_REPO}}" + if [ ! -z "${{inputs.target_org}}" ]; then + TARGET_ORG="${{inputs.target_org}}" + fi + if [ ! -z "${{inputs.target_repo}}" ]; then + TARGET_REPO="${{inputs.target_repo}}" + fi + echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT + echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + + - uses: ./.github/actions/setup-arc-e2e + id: setup + with: + github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} + github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} + github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} + docker-image-name: ${{env.IMAGE_NAME}} + docker-image-tag: ${{env.IMAGE_VERSION}} + + - name: Install gha-runner-scale-set-controller + id: install_arc_controller + run: | + helm install arc \ + --namespace "arc-systems" \ + --create-namespace \ + --set image.repository=${{ env.IMAGE_NAME }} \ + --set image.tag=${{ env.IMAGE_VERSION }} \ + ./charts/gha-runner-scale-set-controller \ + --debug + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller + kubectl get pod -n arc-systems + kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems + + - name: Install gha-runner-scale-set + id: install_arc + run: | + echo "Install openebs/dynamic-localpv-provisioner" + helm repo add openebs https://openebs.github.io/charts + helm repo update + helm install openebs openebs/openebs -n openebs --create-namespace + + ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + helm install "$ARC_NAME" \ + --namespace "arc-runners" \ + --create-namespace \ + --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ + --set containerMode.type="kubernetes" \ + --set containerMode.kubernetesModeWorkVolumeClaim.storageClassName="openebs-hostpath" \ + ./charts/gha-runner-scale-set \ + --debug + echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl get pod -n arc-systems + + - name: Test ARC scales pods up and down + id: test + run: | + export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" + export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" + export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}" + go test ./test_e2e_arc -v + + - name: Uninstall gha-runner-scale-set + if: always() && steps.install_arc.outcome == 'success' + run: | + helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners + kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} + + - name: Dump gha-runner-scale-set-controller logs + if: always() && steps.install_arc_controller.outcome == 'success' + run: | + kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems + + - name: Job summary + if: always() && steps.install_arc.outcome == 'success' + run: | + cat <<-EOF > $GITHUB_STEP_SUMMARY + | **Outcome** | ${{ steps.test.outcome }} | + |----------------|--------------------------------------------- | + | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | + EOF + + auth-proxy-setup: + runs-on: ubuntu-latest + env: + WORKFLOW_FILE: "arc-test-workflow.yaml" + steps: + - uses: actions/checkout@v3 + + - name: Resolve inputs + id: resolved_inputs + run: | + TARGET_ORG="${{env.TARGET_ORG}}" + TARGET_REPO="${{env.TARGET_REPO}}" + if [ ! -z "${{inputs.target_org}}" ]; then + TARGET_ORG="${{inputs.target_org}}" + fi + if [ ! -z "${{inputs.target_repo}}" ]; then + TARGET_REPO="${{inputs.target_repo}}" + fi + echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT + echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + + - uses: ./.github/actions/setup-arc-e2e + id: setup + with: + github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} + github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} + github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} + docker-image-name: ${{env.IMAGE_NAME}} + docker-image-tag: ${{env.IMAGE_VERSION}} + + - name: Install gha-runner-scale-set-controller + id: install_arc_controller + run: | + helm install arc \ + --namespace "arc-systems" \ + --create-namespace \ + --set image.repository=${{ env.IMAGE_NAME }} \ + --set image.tag=${{ env.IMAGE_VERSION }} \ + ./charts/gha-runner-scale-set-controller \ + --debug + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller + kubectl get pod -n arc-systems + kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems + + - name: Install gha-runner-scale-set + id: install_arc + run: | + docker run -d \ + --name squid \ + --publish 3128:3128 \ + huangtingluo/squid-proxy:latest + kubectl create namespace arc-runners + kubectl create secret generic proxy-auth \ + --namespace=arc-runners \ + --from-literal=username=github \ + --from-literal=password='actions' + ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + helm install "$ARC_NAME" \ + --namespace "arc-runners" \ + --create-namespace \ + --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ + --set proxy.https.url="http://host.minikube.internal:3128" \ + --set proxy.https.credentialSecretRef="proxy-auth" \ + --set "proxy.noProxy[0]=10.96.0.1:443" \ + ./charts/gha-runner-scale-set \ + --debug + echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl get pod -n arc-systems + + - name: Test ARC scales pods up and down + id: test + run: | + export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" + export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" + export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}" + go test ./test_e2e_arc -v + + - name: Uninstall gha-runner-scale-set + if: always() && steps.install_arc.outcome == 'success' + run: | + helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners + kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} + + - name: Dump gha-runner-scale-set-controller logs + if: always() && steps.install_arc_controller.outcome == 'success' + run: | + kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems + + - name: Job summary + if: always() && steps.install_arc.outcome == 'success' + run: | + cat <<-EOF > $GITHUB_STEP_SUMMARY + | **Outcome** | ${{ steps.test.outcome }} | + |----------------|--------------------------------------------- | + | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | + EOF + + anonymous-proxy-setup: + runs-on: ubuntu-latest + env: + WORKFLOW_FILE: "arc-test-workflow.yaml" + steps: + - uses: actions/checkout@v3 + + - name: Resolve inputs + id: resolved_inputs + run: | + TARGET_ORG="${{env.TARGET_ORG}}" + TARGET_REPO="${{env.TARGET_REPO}}" + if [ ! -z "${{inputs.target_org}}" ]; then + TARGET_ORG="${{inputs.target_org}}" + fi + if [ ! -z "${{inputs.target_repo}}" ]; then + TARGET_REPO="${{inputs.target_repo}}" + fi + echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT + echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + + - uses: ./.github/actions/setup-arc-e2e + id: setup + with: + github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} + github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} + github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} + docker-image-name: ${{env.IMAGE_NAME}} + docker-image-tag: ${{env.IMAGE_VERSION}} + + - name: Install gha-runner-scale-set-controller + id: install_arc_controller + run: | + helm install arc \ + --namespace "arc-systems" \ + --create-namespace \ + --set image.repository=${{ env.IMAGE_NAME }} \ + --set image.tag=${{ env.IMAGE_VERSION }} \ + ./charts/gha-runner-scale-set-controller \ + --debug + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller + kubectl get pod -n arc-systems + kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems + + - name: Install gha-runner-scale-set + id: install_arc + run: | + docker run -d \ + --name squid \ + --publish 3128:3128 \ + ubuntu/squid:latest + ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + helm install "$ARC_NAME" \ + --namespace "arc-runners" \ + --create-namespace \ + --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ + --set proxy.https.url="http://host.minikube.internal:3128" \ + --set "proxy.noProxy[0]=10.96.0.1:443" \ + ./charts/gha-runner-scale-set \ + --debug + echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl get pod -n arc-systems + + - name: Test ARC scales pods up and down + id: test + run: | + export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" + export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" + export WORKFLOW_FILE="${{ env.WORKFLOW_FILE }}" + go test ./test_e2e_arc -v + + - name: Uninstall gha-runner-scale-set + if: always() && steps.install_arc.outcome == 'success' + run: | + helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners + kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} + + - name: Dump gha-runner-scale-set-controller logs + if: always() && steps.install_arc_controller.outcome == 'success' + run: | + kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems + + - name: Job summary + if: always() && steps.install_arc.outcome == 'success' + run: | + cat <<-EOF > $GITHUB_STEP_SUMMARY + | **Outcome** | ${{ steps.test.outcome }} | + |----------------|--------------------------------------------- | + | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | + EOF diff --git a/test_e2e_arc/arc_jobs_test.go b/test_e2e_arc/arc_jobs_test.go index 8b3ca9f89b..39682c878d 100644 --- a/test_e2e_arc/arc_jobs_test.go +++ b/test_e2e_arc/arc_jobs_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" @@ -93,16 +94,23 @@ func TestARCJobs(t *testing.T) { t.Run("Get available pods during job run", func(t *testing.T) { c := http.Client{} targetArcName := os.Getenv("ARC_NAME") + require.NotEmpty(t, targetArcName, "ARC_NAME environment variable is required for this test to run. (e.g. arc-e2e-test)") + + targetWorkflow := os.Getenv("WORKFLOW_FILE") + require.NotEmpty(t, targetWorkflow, "WORKFLOW_FILE environment variable is required for this test to run. (e.g. e2e_test.yml)") + + ght := os.Getenv("GITHUB_TOKEN") + require.NotEmpty(t, ght, "GITHUB_TOKEN environment variable is required for this test to run.") + // We are triggering manually a workflow that already exists in the repo. // This workflow is expected to spin up a number of runner pods matching the runners value set in podCountsByType. - url := "https://api.github.com/repos/actions-runner-controller/arc_e2e_test_dummy/actions/workflows/arc-test-workflow.yaml/dispatches" + url := "https://api.github.com/repos/actions-runner-controller/arc_e2e_test_dummy/actions/workflows/" + targetWorkflow + "/dispatches" jsonStr := []byte(fmt.Sprintf(`{"ref":"main", "inputs":{"arc_name":"%s"}}`, targetArcName)) req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr)) if err != nil { t.Fatal(err) } - ght := os.Getenv("GITHUB_TOKEN") req.Header.Add("Accept", "application/vnd.github+json") req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ght)) req.Header.Add("X-GitHub-Api-Version", "2022-11-28") From af3982e4bbcd0e78bb3ab6ccac9cdbf98c8eb582 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Thu, 16 Mar 2023 09:21:43 -0400 Subject: [PATCH 146/561] Fix helm chart rendering errors. (#2414) --- .github/workflows/golangci-lint.yaml | 2 +- .../templates/_helpers.tpl | 16 +- .../templates/leader_election_role.yaml | 2 +- .../leader_election_role_binding.yaml | 2 +- .../templates/serviceaccount.yaml | 2 +- .../templates/_helpers.tpl | 123 +++++-------- .../templates/autoscalingrunnerset.yaml | 12 +- .../tests/template_test.go | 168 ++++++++++++++++++ .../tests/values_dind_merge_spec.yaml | 31 ++++ .../tests/values_extra_containers.yaml | 46 +++++ .../tests/values_extra_pod_spec.yaml | 12 ++ .../tests/values_k8s_merge_spec.yaml | 31 ++++ 12 files changed, 350 insertions(+), 97 deletions(-) create mode 100644 charts/gha-runner-scale-set/tests/values_dind_merge_spec.yaml create mode 100644 charts/gha-runner-scale-set/tests/values_extra_containers.yaml create mode 100644 charts/gha-runner-scale-set/tests/values_extra_pod_spec.yaml create mode 100644 charts/gha-runner-scale-set/tests/values_k8s_merge_spec.yaml diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index 26d1bdb1a0..13e5fb55d1 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -20,4 +20,4 @@ jobs: uses: golangci/golangci-lint-action@v3 with: only-new-issues: true - version: v1.49.0 + version: v1.51.1 diff --git a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl index eb37c21fd6..a6b3135ec1 100644 --- a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl @@ -59,16 +59,16 @@ Create the name of the service account to use */}} {{- define "gha-runner-scale-set-controller.serviceAccountName" -}} {{- if eq .Values.serviceAccount.name "default"}} -{{- fail "serviceAccount.name cannot be set to 'default'" }} + {{- fail "serviceAccount.name cannot be set to 'default'" }} {{- end }} {{- if .Values.serviceAccount.create }} -{{- default (include "gha-runner-scale-set-controller.fullname" .) .Values.serviceAccount.name }} + {{- default (include "gha-runner-scale-set-controller.fullname" .) .Values.serviceAccount.name }} {{- else }} - {{- if not .Values.serviceAccount.name }} -{{- fail "serviceAccount.name must be set if serviceAccount.create is false" }} - {{- else }} -{{- .Values.serviceAccount.name }} - {{- end }} + {{- if not .Values.serviceAccount.name }} + {{- fail "serviceAccount.name must be set if serviceAccount.create is false" }} + {{- else }} + {{- .Values.serviceAccount.name }} + {{- end }} {{- end }} {{- end }} @@ -107,7 +107,7 @@ Create the name of the service account to use {{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}} {{- $names := list }} {{- range $k, $v := . }} -{{- $names = append $names $v.name }} + {{- $names = append $names $v.name }} {{- end }} {{- $names | join ","}} {{- end }} \ No newline at end of file diff --git a/charts/gha-runner-scale-set-controller/templates/leader_election_role.yaml b/charts/gha-runner-scale-set-controller/templates/leader_election_role.yaml index a64906d369..e23e0226c0 100644 --- a/charts/gha-runner-scale-set-controller/templates/leader_election_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/leader_election_role.yaml @@ -1,4 +1,4 @@ -{{- if gt (int (default 1 .Values.replicaCount)) 1 -}} +{{- if gt (int (default 1 .Values.replicaCount)) 1 }} # permissions to do leader election. apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/charts/gha-runner-scale-set-controller/templates/leader_election_role_binding.yaml b/charts/gha-runner-scale-set-controller/templates/leader_election_role_binding.yaml index b33dda68da..85effd27f2 100644 --- a/charts/gha-runner-scale-set-controller/templates/leader_election_role_binding.yaml +++ b/charts/gha-runner-scale-set-controller/templates/leader_election_role_binding.yaml @@ -1,4 +1,4 @@ -{{- if gt (int (default 1 .Values.replicaCount)) 1 -}} +{{- if gt (int (default 1 .Values.replicaCount)) 1 }} apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: diff --git a/charts/gha-runner-scale-set-controller/templates/serviceaccount.yaml b/charts/gha-runner-scale-set-controller/templates/serviceaccount.yaml index 090d3a44f5..5d3bbf3f69 100644 --- a/charts/gha-runner-scale-set-controller/templates/serviceaccount.yaml +++ b/charts/gha-runner-scale-set-controller/templates/serviceaccount.yaml @@ -1,4 +1,4 @@ -{{- if .Values.serviceAccount.create -}} +{{- if .Values.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: diff --git a/charts/gha-runner-scale-set/templates/_helpers.tpl b/charts/gha-runner-scale-set/templates/_helpers.tpl index 013ca73e1c..229e5d2abe 100644 --- a/charts/gha-runner-scale-set/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set/templates/_helpers.tpl @@ -75,19 +75,15 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{- define "gha-runner-scale-set.dind-init-container" -}} -{{- range $i, $val := .Values.template.spec.containers -}} -{{- if eq $val.name "runner" -}} +{{- range $i, $val := .Values.template.spec.containers }} + {{- if eq $val.name "runner" }} image: {{ $val.image }} -{{- if $val.imagePullSecrets }} -imagePullSecrets: - {{ $val.imagePullSecrets | toYaml -}} -{{- end }} command: ["cp"] args: ["-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"] volumeMounts: - name: dind-externals mountPath: /home/runner/tmpDir -{{- end }} + {{- end }} {{- end }} {{- end }} @@ -124,7 +120,7 @@ volumeMounts: {{- $createWorkVolume := 1 }} {{- range $i, $volume := .Values.template.spec.volumes }} {{- if eq $volume.name "work" }} - {{- $createWorkVolume = 0 -}} + {{- $createWorkVolume = 0 }} - {{ $volume | toYaml | nindent 2 }} {{- end }} {{- end }} @@ -138,7 +134,7 @@ volumeMounts: {{- $createWorkVolume := 1 }} {{- range $i, $volume := .Values.template.spec.volumes }} {{- if eq $volume.name "work" }} - {{- $createWorkVolume = 0 -}} + {{- $createWorkVolume = 0 }} - {{ $volume | toYaml | nindent 2 }} {{- end }} {{- end }} @@ -160,25 +156,20 @@ volumeMounts: {{- end }} {{- define "gha-runner-scale-set.non-runner-containers" -}} - {{- range $i, $container := .Values.template.spec.containers -}} - {{- if ne $container.name "runner" -}} -- name: {{ $container.name }} - {{- range $key, $val := $container }} - {{- if ne $key "name" }} - {{ $key }}: {{ $val }} - {{- end }} - {{- end }} + {{- range $i, $container := .Values.template.spec.containers }} + {{- if ne $container.name "runner" }} +- {{ $container | toYaml | nindent 2 }} {{- end }} {{- end }} {{- end }} {{- define "gha-runner-scale-set.dind-runner-container" -}} {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} -{{- range $i, $container := .Values.template.spec.containers -}} - {{- if eq $container.name "runner" -}} +{{- range $i, $container := .Values.template.spec.containers }} + {{- if eq $container.name "runner" }} {{- range $key, $val := $container }} {{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }} -{{ $key }}: {{ $val }} +{{ $key }}: {{ $val | toYaml | nindent 2 }} {{- end }} {{- end }} {{- $setDockerHost := 1 }} @@ -195,29 +186,24 @@ env: {{- with $container.env }} {{- range $i, $env := . }} {{- if eq $env.name "DOCKER_HOST" }} - {{- $setDockerHost = 0 -}} + {{- $setDockerHost = 0 }} {{- end }} {{- if eq $env.name "DOCKER_TLS_VERIFY" }} - {{- $setDockerTlsVerify = 0 -}} + {{- $setDockerTlsVerify = 0 }} {{- end }} {{- if eq $env.name "DOCKER_CERT_PATH" }} - {{- $setDockerCertPath = 0 -}} + {{- $setDockerCertPath = 0 }} {{- end }} {{- if eq $env.name "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS" }} - {{- $setRunnerWaitDocker = 0 -}} + {{- $setRunnerWaitDocker = 0 }} {{- end }} {{- if eq $env.name "NODE_EXTRA_CA_CERTS" }} - {{- $setNodeExtraCaCerts = 0 -}} + {{- $setNodeExtraCaCerts = 0 }} {{- end }} {{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }} - {{- $setRunnerUpdateCaCerts = 0 -}} - {{- end }} - - name: {{ $env.name }} - {{- range $envKey, $envVal := $env }} - {{- if ne $envKey "name" }} - {{ $envKey }}: {{ $envVal | toYaml | nindent 8 }} - {{- end }} + {{- $setRunnerUpdateCaCerts = 0 }} {{- end }} + - {{ $env | toYaml | nindent 4 }} {{- end }} {{- end }} {{- if $setDockerHost }} @@ -254,20 +240,15 @@ volumeMounts: {{- with $container.volumeMounts }} {{- range $i, $volMount := . }} {{- if eq $volMount.name "work" }} - {{- $mountWork = 0 -}} + {{- $mountWork = 0 }} {{- end }} {{- if eq $volMount.name "dind-cert" }} - {{- $mountDindCert = 0 -}} + {{- $mountDindCert = 0 }} {{- end }} {{- if eq $volMount.name "github-server-tls-cert" }} - {{- $mountGitHubServerTLS = 0 -}} - {{- end }} - - name: {{ $volMount.name }} - {{- range $mountKey, $mountVal := $volMount }} - {{- if ne $mountKey "name" }} - {{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }} - {{- end }} + {{- $mountGitHubServerTLS = 0 }} {{- end }} + - {{ $volMount | toYaml | nindent 4 }} {{- end }} {{- end }} {{- if $mountWork }} @@ -290,11 +271,11 @@ volumeMounts: {{- define "gha-runner-scale-set.kubernetes-mode-runner-container" -}} {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} -{{- range $i, $container := .Values.template.spec.containers -}} - {{- if eq $container.name "runner" -}} +{{- range $i, $container := .Values.template.spec.containers }} + {{- if eq $container.name "runner" }} {{- range $key, $val := $container }} {{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }} -{{ $key }}: {{ $val }} +{{ $key }}: {{ $val | toYaml | nindent 2 }} {{- end }} {{- end }} {{- $setContainerHooks := 1 }} @@ -310,26 +291,21 @@ env: {{- with $container.env }} {{- range $i, $env := . }} {{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }} - {{- $setContainerHooks = 0 -}} + {{- $setContainerHooks = 0 }} {{- end }} {{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }} - {{- $setPodName = 0 -}} + {{- $setPodName = 0 }} {{- end }} {{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }} - {{- $setRequireJobContainer = 0 -}} + {{- $setRequireJobContainer = 0 }} {{- end }} {{- if eq $env.name "NODE_EXTRA_CA_CERTS" }} - {{- $setNodeExtraCaCerts = 0 -}} + {{- $setNodeExtraCaCerts = 0 }} {{- end }} {{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }} - {{- $setRunnerUpdateCaCerts = 0 -}} - {{- end }} - - name: {{ $env.name }} - {{- range $envKey, $envVal := $env }} - {{- if ne $envKey "name" }} - {{ $envKey }}: {{ $envVal | toYaml | nindent 8 }} - {{- end }} + {{- $setRunnerUpdateCaCerts = 0 }} {{- end }} + - {{ $env | toYaml | nindent 4 }} {{- end }} {{- end }} {{- if $setContainerHooks }} @@ -363,17 +339,12 @@ volumeMounts: {{- with $container.volumeMounts }} {{- range $i, $volMount := . }} {{- if eq $volMount.name "work" }} - {{- $mountWork = 0 -}} + {{- $mountWork = 0 }} {{- end }} {{- if eq $volMount.name "github-server-tls-cert" }} - {{- $mountGitHubServerTLS = 0 -}} - {{- end }} - - name: {{ $volMount.name }} - {{- range $mountKey, $mountVal := $volMount }} - {{- if ne $mountKey "name" }} - {{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }} - {{- end }} + {{- $mountGitHubServerTLS = 0 }} {{- end }} + - {{ $volMount | toYaml | nindent 4 }} {{- end }} {{- end }} {{- if $mountWork }} @@ -391,14 +362,14 @@ volumeMounts: {{- define "gha-runner-scale-set.default-mode-runner-containers" -}} {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} -{{- range $i, $container := .Values.template.spec.containers -}} -{{- if ne $container.name "runner" -}} +{{- range $i, $container := .Values.template.spec.containers }} +{{- if ne $container.name "runner" }} - {{ $container | toYaml | nindent 2 }} {{- else }} - name: {{ $container.name }} {{- range $key, $val := $container }} {{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }} - {{ $key }}: {{ $val }} + {{ $key }}: {{ $val | toYaml | nindent 4 }} {{- end }} {{- end }} {{- $setNodeExtraCaCerts := 0 }} @@ -411,17 +382,12 @@ volumeMounts: {{- with $container.env }} {{- range $i, $env := . }} {{- if eq $env.name "NODE_EXTRA_CA_CERTS" }} - {{- $setNodeExtraCaCerts = 0 -}} + {{- $setNodeExtraCaCerts = 0 }} {{- end }} {{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }} - {{- $setRunnerUpdateCaCerts = 0 -}} - {{- end }} - - name: {{ $env.name }} - {{- range $envKey, $envVal := $env }} - {{- if ne $envKey "name" }} - {{ $envKey }}: {{ $envVal | toYaml | nindent 10 }} - {{- end }} + {{- $setRunnerUpdateCaCerts = 0 }} {{- end }} + - {{ $env | toYaml | nindent 6 }} {{- end }} {{- end }} {{- if $setNodeExtraCaCerts }} @@ -440,14 +406,9 @@ volumeMounts: {{- with $container.volumeMounts }} {{- range $i, $volMount := . }} {{- if eq $volMount.name "github-server-tls-cert" }} - {{- $mountGitHubServerTLS = 0 -}} - {{- end }} - - name: {{ $volMount.name }} - {{- range $mountKey, $mountVal := $volMount }} - {{- if ne $mountKey "name" }} - {{ $mountKey }}: {{ $mountVal | toYaml | nindent 10 }} - {{- end }} + {{- $mountGitHubServerTLS = 0 }} {{- end }} + - {{ $volMount | toYaml | nindent 6 }} {{- end }} {{- end }} {{- if $mountGitHubServerTLS }} diff --git a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index e974be48cf..e272291b23 100644 --- a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -36,17 +36,21 @@ spec: {{- if .Values.proxy.http }} http: url: {{ .Values.proxy.http.url }} + {{- if .Values.proxy.http.credentialSecretRef }} credentialSecretRef: {{ .Values.proxy.http.credentialSecretRef }} - {{ end }} + {{- end }} + {{- end }} {{- if .Values.proxy.https }} https: url: {{ .Values.proxy.https.url }} + {{- if .Values.proxy.https.credentialSecretRef }} credentialSecretRef: {{ .Values.proxy.https.credentialSecretRef }} - {{ end }} + {{- end }} + {{- end }} {{- if and .Values.proxy.noProxy (kindIs "slice" .Values.proxy.noProxy) }} noProxy: {{ .Values.proxy.noProxy | toYaml | nindent 6}} - {{ end }} - {{ end }} + {{- end }} + {{- end }} {{- if and (or (kindIs "int64" .Values.minRunners) (kindIs "float64" .Values.minRunners)) (or (kindIs "int64" .Values.maxRunners) (kindIs "float64" .Values.maxRunners)) }} {{- if gt .Values.minRunners .Values.maxRunners }} diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index 24a0832651..c5960ad34c 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -1520,3 +1520,171 @@ func TestTemplate_CreateManagerRoleBinding(t *testing.T) { assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name) assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace) } + +func TestTemplateRenderedAutoScalingRunnerSet_ExtraContainers(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + testValuesPath, err := filepath.Abs("../tests/values_extra_containers.yaml") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, + ValuesFiles: []string{testValuesPath}, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug") + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers") + assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner") + assert.Equal(t, "other", ars.Spec.Template.Spec.Containers[1].Name, "Container name should be other") + assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set") + assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set") + assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[1].Resources.Limits.Cpu().String(), "CPU Limit should be set") + assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[1].Resources.Limits.Memory().String(), "Memory Limit should be set") + assert.Equal(t, "SOME_ENV", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "SOME_ENV should be set") + assert.Equal(t, "SOME_VALUE", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "SOME_ENV should be set to `SOME_VALUE`") + assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set") + assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`") + assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work") + assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work") + assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others") + assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others") + assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be work") + assert.Equal(t, corev1.DNSNone, ars.Spec.Template.Spec.DNSPolicy, "DNS Policy should be None") + assert.Equal(t, "192.0.2.1", ars.Spec.Template.Spec.DNSConfig.Nameservers[0], "DNS Nameserver should be set") +} + +func TestTemplateRenderedAutoScalingRunnerSet_ExtraPodSpec(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + testValuesPath, err := filepath.Abs("../tests/values_extra_pod_spec.yaml") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, + ValuesFiles: []string{testValuesPath}, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers") + assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner") + assert.Equal(t, corev1.DNSNone, ars.Spec.Template.Spec.DNSPolicy, "DNS Policy should be None") + assert.Equal(t, "192.0.2.1", ars.Spec.Template.Spec.DNSConfig.Nameservers[0], "DNS Nameserver should be set") +} + +func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + testValuesPath, err := filepath.Abs("../tests/values_dind_merge_spec.yaml") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, + ValuesFiles: []string{testValuesPath}, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug") + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers") + assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner") + assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set") + assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set") + assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "DOCKER_HOST should be set") + assert.Equal(t, "tcp://localhost:9999", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "DOCKER_HOST should be set to `tcp://localhost:9999`") + assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set") + assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`") + assert.Equal(t, "DOCKER_TLS_VERIFY", ars.Spec.Template.Spec.Containers[0].Env[2].Name, "DOCKER_TLS_VERIFY should be set") + assert.Equal(t, "1", ars.Spec.Template.Spec.Containers[0].Env[2].Value, "DOCKER_TLS_VERIFY should be set to `1`") + assert.Equal(t, "DOCKER_CERT_PATH", ars.Spec.Template.Spec.Containers[0].Env[3].Name, "DOCKER_CERT_PATH should be set") + assert.Equal(t, "/certs/client", ars.Spec.Template.Spec.Containers[0].Env[3].Value, "DOCKER_CERT_PATH should be set to `/certs/client`") + assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work") + assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work") + assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others") + assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others") +} + +func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + testValuesPath, err := filepath.Abs("../tests/values_k8s_merge_spec.yaml") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, + ValuesFiles: []string{testValuesPath}, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}, "--debug") + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers") + assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner") + assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set") + assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set") + assert.Equal(t, "ACTIONS_RUNNER_CONTAINER_HOOKS", ars.Spec.Template.Spec.Containers[0].Env[0].Name, "ACTIONS_RUNNER_CONTAINER_HOOKS should be set") + assert.Equal(t, "/k8s/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value, "ACTIONS_RUNNER_CONTAINER_HOOKS should be set to `/k8s/index.js`") + assert.Equal(t, "MY_NODE_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name, "MY_NODE_NAME should be set") + assert.Equal(t, "spec.nodeName", ars.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath, "MY_NODE_NAME should be set to `spec.nodeName`") + assert.Equal(t, "ACTIONS_RUNNER_POD_NAME", ars.Spec.Template.Spec.Containers[0].Env[2].Name, "ACTIONS_RUNNER_POD_NAME should be set") + assert.Equal(t, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER", ars.Spec.Template.Spec.Containers[0].Env[3].Name, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER should be set") + assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name, "VolumeMount name should be work") + assert.Equal(t, "/work", ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath, "VolumeMount mountPath should be /work") + assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others") + assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others") +} diff --git a/charts/gha-runner-scale-set/tests/values_dind_merge_spec.yaml b/charts/gha-runner-scale-set/tests/values_dind_merge_spec.yaml new file mode 100644 index 0000000000..21b9a56b22 --- /dev/null +++ b/charts/gha-runner-scale-set/tests/values_dind_merge_spec.yaml @@ -0,0 +1,31 @@ +githubConfigUrl: https://github.com/actions/actions-runner-controller +githubConfigSecret: + github_token: test +template: + spec: + containers: + - name: runner + image: runner-image:latest + env: + - name: DOCKER_HOST + value: tcp://localhost:9999 + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: work + mountPath: /work + - name: others + mountPath: /others + resources: + limits: + memory: "64Mi" + cpu: "250m" + volumes: + - name: work + hostPath: + path: /data + type: Directory +containerMode: + type: dind \ No newline at end of file diff --git a/charts/gha-runner-scale-set/tests/values_extra_containers.yaml b/charts/gha-runner-scale-set/tests/values_extra_containers.yaml new file mode 100644 index 0000000000..61c72fe6cc --- /dev/null +++ b/charts/gha-runner-scale-set/tests/values_extra_containers.yaml @@ -0,0 +1,46 @@ +githubConfigUrl: https://github.com/actions/actions-runner-controller +githubConfigSecret: + github_token: test +template: + spec: + containers: + - name: runner + image: runner-image:latest + env: + - name: SOME_ENV + value: SOME_VALUE + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: work + mountPath: /work + - name: others + mountPath: /others + resources: + limits: + memory: "64Mi" + cpu: "250m" + - name: other + image: other-image:latest + volumeMounts: + - name: work + mountPath: /work + - name: others + mountPath: /others + resources: + limits: + memory: "64Mi" + cpu: "250m" + volumes: + - name: work + hostPath: + path: /data + type: Directory + dnsPolicy: "None" + dnsConfig: + nameservers: + - 192.0.2.1 +containerMode: + type: none \ No newline at end of file diff --git a/charts/gha-runner-scale-set/tests/values_extra_pod_spec.yaml b/charts/gha-runner-scale-set/tests/values_extra_pod_spec.yaml new file mode 100644 index 0000000000..39ac799c7e --- /dev/null +++ b/charts/gha-runner-scale-set/tests/values_extra_pod_spec.yaml @@ -0,0 +1,12 @@ +githubConfigUrl: https://github.com/actions/actions-runner-controller +githubConfigSecret: + github_token: test +template: + spec: + containers: + - name: runner + image: runner-image:latest + dnsPolicy: "None" + dnsConfig: + nameservers: + - 192.0.2.1 \ No newline at end of file diff --git a/charts/gha-runner-scale-set/tests/values_k8s_merge_spec.yaml b/charts/gha-runner-scale-set/tests/values_k8s_merge_spec.yaml new file mode 100644 index 0000000000..c62cf0e5e8 --- /dev/null +++ b/charts/gha-runner-scale-set/tests/values_k8s_merge_spec.yaml @@ -0,0 +1,31 @@ +githubConfigUrl: https://github.com/actions/actions-runner-controller +githubConfigSecret: + github_token: test +template: + spec: + containers: + - name: runner + image: runner-image:latest + env: + - name: ACTIONS_RUNNER_CONTAINER_HOOKS + value: /k8s/index.js + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: work + mountPath: /work + - name: others + mountPath: /others + resources: + limits: + memory: "64Mi" + cpu: "250m" + volumes: + - name: work + hostPath: + path: /data + type: Directory +containerMode: + type: kubernetes \ No newline at end of file From 0b9c063b301562a247bfc29a525c5ed107367666 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Thu, 16 Mar 2023 16:02:18 +0100 Subject: [PATCH 147/561] Update the values.yaml sample for improved clarity (#2416) --- charts/gha-runner-scale-set/values.yaml | 35 ++++++++++++------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/charts/gha-runner-scale-set/values.yaml b/charts/gha-runner-scale-set/values.yaml index 7cb190f733..c9a6d815a7 100644 --- a/charts/gha-runner-scale-set/values.yaml +++ b/charts/gha-runner-scale-set/values.yaml @@ -68,16 +68,20 @@ githubConfigSecret: # key: ca.pem # runnerMountPath: /usr/local/share/ca-certificates/ -## template is the PodSpec for each runner Pod -template: - spec: - containers: - - name: runner - image: ghcr.io/actions/actions-runner:latest - command: ["/home/runner/run.sh"] - containerMode: type: "" ## type can be set to dind or kubernetes + ## the following is required when containerMode.type=kubernetes + # kubernetesModeWorkVolumeClaim: + # accessModes: ["ReadWriteOnce"] + # # For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath + # storageClassName: "dynamic-blob-storage" + # resources: + # requests: + # storage: 1Gi + +## template is the PodSpec for each runner Pod +template: + ## template.spec will be modified if you change the container mode ## with containerMode.type=dind, we will populate the template.spec with following pod spec ## template: ## spec: @@ -151,16 +155,11 @@ containerMode: ## resources: ## requests: ## storage: 1Gi - - ## the following is required when containerMode.type=kubernetes - kubernetesModeWorkVolumeClaim: - accessModes: ["ReadWriteOnce"] - # For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath - # TODO: remove before release - storageClassName: "dynamic-blob-storage" - resources: - requests: - storage: 1Gi + spec: + containers: + - name: runner + image: ghcr.io/actions/actions-runner:latest + command: ["/home/runner/run.sh"] ## Optional controller service account that needs to have required Role and RoleBinding ## to operate this gha-runner-scale-set installation. From 33a9408f8736c0cf88f89b37ac16441fc6b0587b Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Thu, 16 Mar 2023 16:02:42 +0100 Subject: [PATCH 148/561] Introduce ADR change for adding labels to our resources (#2407) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- adrs/2022-10-17-runner-image.md | 54 ++++++----- adrs/2022-10-27-runnerscaleset-lifetime.md | 24 ++--- adrs/2022-11-04-crd-api-group-name.md | 15 ++-- .../2022-12-05-adding-labels-k8s-resources.md | 15 +++- ...-27-pick-the-right-runner-to-scale-down.md | 52 ++++++----- adrs/2023-02-02-automate-runner-updates.md | 5 +- ...023-02-10-limit-manager-role-permission.md | 19 ++-- .../2023-04-14-adding-labels-k8s-resources.md | 89 +++++++++++++++++++ adrs/yyyy-mm-dd-TEMPLATE.md | 8 +- .../templates/_helpers.tpl | 4 +- .../templates/deployment.yaml | 2 +- .../tests/template_test.go | 2 + 12 files changed, 207 insertions(+), 82 deletions(-) create mode 100644 adrs/2023-04-14-adding-labels-k8s-resources.md diff --git a/adrs/2022-10-17-runner-image.md b/adrs/2022-10-17-runner-image.md index f9d2a88e64..c219bacf1d 100644 --- a/adrs/2022-10-17-runner-image.md +++ b/adrs/2022-10-17-runner-image.md @@ -1,4 +1,5 @@ -# ADR 0001: Produce the runner image for the scaleset client +# ADR 2022-10-17: Produce the runner image for the scaleset client + **Date**: 2022-10-17 **Status**: Done @@ -7,6 +8,7 @@ We aim to provide an similar experience (as close as possible) between self-hosted and GitHub-hosted runners. To achieve this, we are making the following changes to align our self-hosted runner container image with the Ubuntu runners managed by GitHub. Here are the changes: + - We created a USER `runner(1001)` and a GROUP `docker(123)` - `sudo` has been on the image and the `runner` will be a passwordless sudoer. - The runner binary was placed placed under `/home/runner/` and launched using `/home/runner/run.sh` @@ -18,31 +20,33 @@ The latest Dockerfile can be found at: https://github.com/actions/runner/blob/ma # Context -user can bring their own runner images, the contract we have are: -- It must have a runner binary under /actions-runner (/actions-runner/run.sh exists) -- The WORKDIR is set to /actions-runner -- If the user inside the container is root, the ENV RUNNER_ALLOW_RUNASROOT should be set to 1 +users can bring their own runner images, the contract we require is: + +- It must have a runner binary under `/actions-runner` i.e. `/actions-runner/run.sh` exists +- The `WORKDIR` is set to `/actions-runner` +- If the user inside the container is root, the environment variable `RUNNER_ALLOW_RUNASROOT` should be set to `1` -The existing ARC runner images will not work with the new ARC mode out-of-box for the following reason: +The existing [ARC runner images](https://github.com/orgs/actions-runner-controller/packages?tab=packages&q=actions-runner) will not work with the new ARC mode out-of-box for the following reason: -- The current runner image requires caller to pass runner configure info, ex: URL and Config Token -- The current runner image has the runner binary under /runner +- The current runner image requires the caller to pass runner configuration info, ex: URL and Config Token +- The current runner image has the runner binary under `/runner` which violates the contract described above - The current runner image requires a special entrypoint script in order to work around some volume mount limitation for setting up DinD. -However, since we expose the raw runner Pod spec to our user, advanced user can modify the helm values.yaml to make everything lines up properly. +Since we expose the raw runner PodSpec to our end users, they can modify the helm `values.yaml` to adjust the runner container to their needs. # Guiding Principles - Build image is separated in two stages. ## The first stage (build) + - Reuses the same base image, so it is faster to build. -- Installs utilities needed to download assets (runner and runner-container-hooks). +- Installs utilities needed to download assets (`runner` and `runner-container-hooks`). - Downloads the runner and stores it into `/actions-runner` directory. - Downloads the runner-container-hooks and stores it into `/actions-runner/k8s` directory. - You can use build arguments to control the runner version, the target platform and runner container hooks version. -Preview: +Preview (the published runner image might vary): ```Dockerfile FROM mcr.microsoft.com/dotnet/runtime-deps:6.0 as build @@ -64,6 +68,7 @@ RUN curl -f -L -o runner-container-hooks.zip https://github.com/actions/runner-c ``` ## The main image: + - Copies assets from the build stage to `/actions-runner` - Does not provide an entrypoint. The entrypoint should be set within the container definition. @@ -77,6 +82,7 @@ COPY --from=build /actions-runner . ``` ## Example of pod spec with the init container copying assets + ```yaml apiVersion: v1 kind: Pod @@ -84,20 +90,20 @@ metadata: name: spec: containers: - - name: runner - image: - command: ["/runner/run.sh"] - volumeMounts: - name: runner - mountPath: /runner + image: + command: ["/runner/run.sh"] + volumeMounts: + - name: runner + mountPath: /runner initContainers: - - name: setup - image: - command: ["sh", "-c", "cp -r /actions-runner/* /runner/"] - volumeMounts: - - name: runner - mountPath: /runner + - name: setup + image: + command: ["sh", "-c", "cp -r /actions-runner/* /runner/"] + volumeMounts: + - name: runner + mountPath: /runner volumes: - - name: runner - emptyDir: {} + - name: runner + emptyDir: {} ``` diff --git a/adrs/2022-10-27-runnerscaleset-lifetime.md b/adrs/2022-10-27-runnerscaleset-lifetime.md index a66d44a63a..c9c95fd3b1 100644 --- a/adrs/2022-10-27-runnerscaleset-lifetime.md +++ b/adrs/2022-10-27-runnerscaleset-lifetime.md @@ -1,4 +1,4 @@ -# ADR 0003: Lifetime of RunnerScaleSet on Service +# ADR 2022-10-27: Lifetime of RunnerScaleSet on Service **Date**: 2022-10-27 @@ -12,8 +12,9 @@ The `RunnerScaleSet` object will represent a set of homogeneous self-hosted runn A `RunnerScaleSet` client (ARC) needs to communicate with the Actions service via HTTP long-poll in a certain protocol to get a workflow job successfully landed on one of its homogeneous self-hosted runners. -In this ADR, I want to discuss the following within the context of actions-runner-controller's new scaling mode: -- Who and how to create a RunnerScaleSet on the service? +In this ADR, we discuss the following within the context of actions-runner-controller's new scaling mode: + +- Who and how to create a RunnerScaleSet on the service? - Who and how to delete a RunnerScaleSet on the service? - What will happen to all the runners and jobs when the deletion happens? @@ -30,18 +31,19 @@ In this ADR, I want to discuss the following within the context of actions-runne - When the user patch existing `AutoScalingRunnerSet`'s RunnerScaleSet related properly, ex: `runnerGroupName`, `runnerWorkDir`, the controller needs to make an HTTP PATCH call to the `_apis/runtime/runnerscalesets/2` endpoint in order to update the object on the service. - We will put the deployed `AutoScalingRunnerSet` resource in an error state when the user tries to patch the resource with a different `githubConfigUrl` -> Basically, you can't move a deployed `AutoScalingRunnerSet` across GitHub entity, repoA->repoB, repoA->OrgC, etc. -> We evaluated blocking the change before instead of erroring at runtime and that we decided not to go down this route because it forces us to re-introduce admission webhooks (require cert-manager). + > Basically, you can't move a deployed `AutoScalingRunnerSet` across GitHub entity, repoA->repoB, repoA->OrgC, etc. + > We evaluated blocking the change before instead of erroring at runtime and that we decided not to go down this route because it forces us to re-introduce admission webhooks (require cert-manager). ## RunnerScaleSet deletion - `AutoScalingRunnerSet` custom resource controller will delete the `RunnerScaleSet` object in the Actions service on any `AutoScalingRunnerSet` resource deletion. -> `AutoScalingRunnerSet` deletion will contain several steps: -> - Stop the listener app so no more new jobs coming and no more scaling up/down. -> - Request scale down to 0 -> - Force stop all runners -> - Wait for the scale down to 0 -> - Delete the `RunnerScaleSet` object from service via REST API + > `AutoScalingRunnerSet` deletion will contain several steps: + > + > - Stop the listener app so no more new jobs coming and no more scaling up/down. + > - Request scale down to 0 + > - Force stop all runners + > - Wait for the scale down to 0 + > - Delete the `RunnerScaleSet` object from service via REST API - The deletion is via REST API on Actions service `DELETE _apis/runtime/runnerscalesets/1` - The deletion needs to use the runner registration token (admin). diff --git a/adrs/2022-11-04-crd-api-group-name.md b/adrs/2022-11-04-crd-api-group-name.md index e3aabcdd79..654c6a5fcf 100644 --- a/adrs/2022-11-04-crd-api-group-name.md +++ b/adrs/2022-11-04-crd-api-group-name.md @@ -1,4 +1,5 @@ -# ADR 0004: Technical detail about actions-runner-controller repository transfer +# ADR 2022-11-04: Technical detail about actions-runner-controller repository transfer + **Date**: 2022-11-04 **Status**: Done @@ -8,17 +9,18 @@ As part of ARC Private Beta: Repository Migration & Open Sourcing Process, we have decided to transfer the current [actions-runner-controller repository](https://github.com/actions-runner-controller/actions-runner-controller) into the [Actions org](https://github.com/actions). **Goals:** + - A clear signal that GitHub will start taking over ARC and provide support. - Since we are going to deprecate the existing auto-scale mode in ARC at some point, we want to have a clear separation between the legacy mode (not supported) and the new mode (supported). -- Avoid disrupting users as much as we can, existing ARC users will not notice any difference after the repository transfer, they can keep upgrading to the newer version of ARC and keep using the legacy mode. +- Avoid disrupting users as much as we can, existing ARC users will not notice any difference after the repository transfer, they can keep upgrading to the newer version of ARC and keep using the legacy mode. **Challenges** + - The original creator's name (`summerwind`) is all over the place, including some critical parts of ARC: - - The k8s user resource API's full name is `actions.summerwind.dev/v1alpha1/RunnerDeployment`, renaming it to `actions.github.com` is a breaking change and will force the user to rebuild their entire k8s cluster. - - All docker images around ARC (controller + default runner) is published to [dockerhub/summerwind](https://hub.docker.com/u/summerwind) + - The k8s user resource API's full name is `actions.summerwind.dev/v1alpha1/RunnerDeployment`, renaming it to `actions.github.com` is a breaking change and will force the user to rebuild their entire k8s cluster. + - All docker images around ARC (controller + default runner) is published to [dockerhub/summerwind](https://hub.docker.com/u/summerwind) - The helm chart for ARC is currently hosted on [GitHub pages](https://actions-runner-controller.github.io/actions-runner-controller) for https://github.com/actions-runner-controller/actions-runner-controller, moving the repository means we will break users who install ARC via the helm chart - # Decisions ## APIs group names for k8s custom resources, `actions.summerwind` or `actions.github` @@ -27,8 +29,9 @@ As part of ARC Private Beta: Repository Migration & Open Sourcing Process, we ha - For any new resource API we are going to add, those will be named properly under GitHub, ex: `actions.github.com/v1alpha1/AutoScalingRunnerSet` Benefits: + - A clear separation from existing ARC: - - Easy for the support engineer to triage income tickets and figure out whether we need to support the use case from the user + - Easy for the support engineer to triage income tickets and figure out whether we need to support the use case from the user - We won't break existing users when they upgrade to a newer version of ARC after the repository transfer Based on the spike done by `@nikola-jokic`, we have confidence that we can host multiple resources with different API names under the same repository, and the published ARC controller can handle both resources properly. diff --git a/adrs/2022-12-05-adding-labels-k8s-resources.md b/adrs/2022-12-05-adding-labels-k8s-resources.md index 1ce4246ef3..7ce328a264 100644 --- a/adrs/2022-12-05-adding-labels-k8s-resources.md +++ b/adrs/2022-12-05-adding-labels-k8s-resources.md @@ -1,8 +1,8 @@ -# ADR 0007: Adding labels to our resources +# ADR 2022-12-05: Adding labels to our resources **Date**: 2022-12-05 -**Status**: Done +**Status**: Deprecated [^1] ## Context @@ -20,12 +20,15 @@ Assuming standard logging that would allow us to get all ARC logs by running ```bash kubectl logs -l 'app.kubernetes.io/part-of=actions-runner-controller' ``` + which would be very useful for development to begin with. The proposal is to add these sets of labels to the pods ARC creates: #### controller-manager + Labels to be set by the Helm chart: + ```yaml metadata: labels: @@ -35,7 +38,9 @@ metadata: ``` #### Listener + Labels to be set by controller at creation: + ```yaml metadata: labels: @@ -43,7 +48,7 @@ metadata: app.kubernetes.io/component: runner-scale-set-listener app.kubernetes.io/version: "x.x.x" actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet - + # the following labels are to be extracted by the config URL actions.github.com/enterprise: enterprise actions.github.com/organization: organization @@ -51,7 +56,9 @@ metadata: ``` #### Runner + Labels to be set by controller at creation: + ```yaml metadata: labels: @@ -78,3 +85,5 @@ Or for example if they're having problems specifically with runners: This way users don't have to understand ARC moving parts but we still have a way to target them specifically if we need to. + +[^1]: Superseded by [ADR 2023-04-14](2023-04-14-adding-labels-k8s-resources.md) diff --git a/adrs/2022-12-27-pick-the-right-runner-to-scale-down.md b/adrs/2022-12-27-pick-the-right-runner-to-scale-down.md index 217925233c..3cf7dbd063 100644 --- a/adrs/2022-12-27-pick-the-right-runner-to-scale-down.md +++ b/adrs/2022-12-27-pick-the-right-runner-to-scale-down.md @@ -1,4 +1,5 @@ -# ADR 0008: Pick the right runner to scale down +# ADR 2022-12-27: Pick the right runner to scale down + **Date**: 2022-12-27 **Status**: Done @@ -7,35 +8,37 @@ - A custom resource `EphemeralRunnerSet` manage a set of custom resource `EphemeralRunners` - The `EphemeralRunnerSet` has `Replicas` in its `Spec`, and the responsibility of the `EphemeralRunnerSet_controller` is to reconcile a given `EphemeralRunnerSet` to have - the same amount of `EphemeralRunners` as the `Spec.Replicas` defined. - - This means the `EphemeralRunnerSet_controller` will scale up the `EphemeralRunnerSet` by creating more `EphemeralRunner` in the case of the `Spec.Replicas` is higher than - the current amount of `EphemeralRunners`. - - This also means the `EphemeralRunnerSet_controller` will scale down the `EphemeralRunnerSet` by finding some existing `EphemeralRunner` to delete in the case of + the same amount of `EphemeralRunners` as the `Spec.Replicas` defined. +- This means the `EphemeralRunnerSet_controller` will scale up the `EphemeralRunnerSet` by creating more `EphemeralRunner` in the case of the `Spec.Replicas` is higher than + the current amount of `EphemeralRunners`. +- This also means the `EphemeralRunnerSet_controller` will scale down the `EphemeralRunnerSet` by finding some existing `EphemeralRunner` to delete in the case of the `Spec.Replicas` is less than the current amount of `EphemeralRunners`. - - This ADR is about how can we find the right existing `EphemeralRunner` to delete when we need to scale down. - - - ## Current approach - + +This ADR is about how can we find the right existing `EphemeralRunner` to delete when we need to scale down. + +## Current approach + 1. `EphemeralRunnerSet_controller` figure out how many `EphemeralRunner` it needs to delete, ex: need to scale down from 10 to 2 means we need to delete 8 `EphemeralRunner` 2. `EphemeralRunnerSet_controller` find all `EphemeralRunner` that is in the `Running` or `Pending` phase. - > `Pending` means the `EphemeralRunner` is still probably creating and a runner has not yet configured with the Actions service. - > `Running` means the `EphemeralRunner` is created and a runner has probably configured with Actions service, the runner may sit there idle, - > or maybe actively running a workflow job. We don't have a clear answer for it from the ARC side. (Actions service knows it for sure) + + > `Pending` means the `EphemeralRunner` is still probably creating and a runner has not yet configured with the Actions service. + > `Running` means the `EphemeralRunner` is created and a runner has probably configured with Actions service, the runner may sit there idle, + > or maybe actively running a workflow job. We don't have a clear answer for it from the ARC side. (Actions service knows it for sure) 3. `EphemeralRunnerSet_controller` make an HTTP DELETE request to the Actions service for each `EphemeralRunner` from the previous step and ask the Actions service to delete the runner via `RunnerId`. -(The `RunnerId` is generated after the runner registered with the Actions service, and stored on the `EphemeralRunner.Status.RunnerId`) - > - The HTTP DELETE request looks like the following: - > `DELETE https://pipelines.actions.githubusercontent.com/WoxlUxJHrKEzIp4Nz3YmrmLlZBonrmj9xCJ1lrzcJ9ZsD1Tnw7/_apis/distributedtask/pools/0/agents/1024` - > The Actions service will return 2 types of responses: - > 1. 204 (No Content): The runner with Id 1024 has been successfully removed from the service or the runner with Id 1024 doesn't exist. - > 2. 400 (Bad Request) with JSON body that contains an error message like `JobStillRunningException`: The service can't remove this runner at this point since it has been - > assigned to a job request, the client won't be able to remove the runner until the runner finishes its current assigned job request. + (The `RunnerId` is generated after the runner registered with the Actions service, and stored on the `EphemeralRunner.Status.RunnerId`) -4. `EphemeralRunnerSet_controller` will ignore any deletion error from runners that are still running a job, and keep trying deletion until the amount of `204` equals the amount of -`EphemeralRunner` needs to delete. + > - The HTTP DELETE request looks like the following: + > `DELETE https://pipelines.actions.githubusercontent.com/WoxlUxJHrKEzIp4Nz3YmrmLlZBonrmj9xCJ1lrzcJ9ZsD1Tnw7/_apis/distributedtask/pools/0/agents/1024` + > The Actions service will return 2 types of responses: + > + > 1. 204 (No Content): The runner with Id 1024 has been successfully removed from the service or the runner with Id 1024 doesn't exist. + > 2. 400 (Bad Request) with JSON body that contains an error message like `JobStillRunningException`: The service can't remove this runner at this point since it has been + > assigned to a job request, the client won't be able to remove the runner until the runner finishes its current assigned job request. + +4. `EphemeralRunnerSet_controller` will ignore any deletion error from runners that are still running a job, and keep trying deletion until the amount of `204` equals the amount of + `EphemeralRunner` needs to delete. ## The problem with the current approach @@ -68,6 +71,7 @@ this would be a big `NO` from a security point of view since we may not trust th The nature of the k8s controller-runtime means we might reconcile the resource base on stale cache data. I think our goal for the solution should be: + - Reduce wasteful HTTP requests on a scale-down as much as we can. - We can accept that we might make 1 or 2 wasteful requests to Actions service, but we can't accept making 5/10+ of them. - See if we can meet feature parity with what the RunnerJobHook support with compromise any security concerns. @@ -77,9 +81,11 @@ a simple thought is how about we somehow attach some info to the `EphemeralRunne How about we send this info from the service to the auto-scaling-listener via the existing HTTP long-poll and let the listener patch the `EphemeralRunner.Status` to indicate it's running a job? + > The listener is normally in a separate namespace with elevated permission and it's something we can trust. Changes: + - Introduce a new message type `JobStarted` (in addition to the existing `JobAvailable/JobAssigned/JobCompleted`) on the service side, the message is sent when a runner of the `RunnerScaleSet` get assigned to a job, `RequestId`, `RunnerId`, and `RunnerName` will be included in the message. - Add `RequestId (int)` to `EphemeralRunner.Status`, this will indicate which job the runner is running. diff --git a/adrs/2023-02-02-automate-runner-updates.md b/adrs/2023-02-02-automate-runner-updates.md index f88f0a325e..393e78996c 100644 --- a/adrs/2023-02-02-automate-runner-updates.md +++ b/adrs/2023-02-02-automate-runner-updates.md @@ -1,4 +1,6 @@ -# Automate updating runner version +# ADR 2023-02-02: Automate updating runner version + +**Date**: 2023-02-02 **Status**: Proposed @@ -16,6 +18,7 @@ version is updated (and this is currently done manually). We can have another workflow running on a cadence (hourly seems sensible) and checking for new runner releases, creating a PR updating `RUNNER_VERSION` in: + - `.github/workflows/release-runners.yaml` - `Makefile` - `runner/Makefile` diff --git a/adrs/2023-02-10-limit-manager-role-permission.md b/adrs/2023-02-10-limit-manager-role-permission.md index 9838b4a4cd..803a4ec967 100644 --- a/adrs/2023-02-10-limit-manager-role-permission.md +++ b/adrs/2023-02-10-limit-manager-role-permission.md @@ -1,4 +1,5 @@ -# ADR 0007: Limit Permissions for Service Accounts in Actions-Runner-Controller +# ADR 2023-02-10: Limit Permissions for Service Accounts in Actions-Runner-Controller + **Date**: 2023-02-10 **Status**: Pending @@ -7,7 +8,7 @@ - `actions-runner-controller` is a Kubernetes CRD (with controller) built using https://github.com/kubernetes-sigs/controller-runtime -- [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) has a default cache based k8s API client.Reader to make query k8s API server more efficiency. +- [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) has a default cache based k8s API client.Reader to make query k8s API server more efficiency. - The cache-based API client requires cluster scope `list` and `watch` permission for any resource the controller may query. @@ -22,6 +23,7 @@ There are 3 service accounts involved for a working `AutoscalingRunnerSet` based This should have the lowest privilege (not any `RoleBinding` nor `ClusterRoleBinding`) by default, in the case of `containerMode=kubernetes`, it will get certain write permission with `RoleBinding` to limit the permission to a single namespace. > References: +> > - ./charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml > - ./charts/gha-runner-scale-set/templates/kube_mode_role.yaml > - ./charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml @@ -52,7 +54,7 @@ The current `ClusterRole` has the following permissions: ## Limit cluster role permission on Secrets -The cluster scope `List` `Secrets` permission might be a blocker for adopting `actions-runner-controller` for certain customers as they may have certain restriction in their cluster that simply doesn't allow any service account to have cluster scope `List Secrets` permission. +The cluster scope `List` `Secrets` permission might be a blocker for adopting `actions-runner-controller` for certain customers as they may have certain restriction in their cluster that simply doesn't allow any service account to have cluster scope `List Secrets` permission. To help these customers and improve security for `actions-runner-controller` in general, we will try to limit the `ClusterRole` permission of the controller manager's service account down to the following: @@ -79,9 +81,10 @@ The `Role` and `RoleBinding` creation will happen during the `helm install demo During `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`, we will store the controller's service account info as labels on the controller `Deployment`. Ex: + ```yaml - actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }} - actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} +actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }} +actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} ``` Introduce a new `Role` per `AutoScalingRunnerSet` installation and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace that each `AutoScalingRunnerSet` deployed with the following permission. @@ -102,8 +105,9 @@ The `gha-runner-scale-set` helm chart will use this service account to properly The `gha-runner-scale-set` helm chart will also allow customers to explicitly provide the controller service account info, in case the `helm lookup` couldn't locate the right controller `Deployment`. New sections in `values.yaml` of `gha-runner-scale-set`: + ```yaml -## Optional controller service account that needs to have required Role and RoleBinding +## Optional controller service account that needs to have required Role and RoleBinding ## to operate this gha-runner-scale-set installation. ## The helm chart will try to find the controller deployment and its service account at installation time. ## In case the helm chart can't find the right service account, you can explicitly pass in the following value @@ -129,5 +133,6 @@ You will deploy the `AutoScalingRunnerSet` with something like `helm install dem In this mode, you will end up with a manager `Role` that has all Get/List/Create/Delete/Update/Patch/Watch permissions on resources we need, and a `RoleBinding` to bind the `Role` with the controller `ServiceAccount` in the watched single namespace and the controller namespace, ex: `test-namespace` and `arc-system` in the above example. The downside of this mode: + - When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other. -- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster. \ No newline at end of file +- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster. diff --git a/adrs/2023-04-14-adding-labels-k8s-resources.md b/adrs/2023-04-14-adding-labels-k8s-resources.md new file mode 100644 index 0000000000..737e0653c8 --- /dev/null +++ b/adrs/2023-04-14-adding-labels-k8s-resources.md @@ -0,0 +1,89 @@ +# ADR 2023-04-14: Adding labels to our resources + +**Date**: 2023-04-14 + +**Status**: Done [^1] + +## Context + +Users need to provide us with logs so that we can help support and troubleshoot their issues. We need a way for our users to filter and retrieve the logs we need. + +## Proposal + +A good start would be a catch-all label to get all logs that are +ARC-related: one of the [recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/) +is `app.kubernetes.io/part-of` and we can set that for all ARC components +to be `actions-runner-controller`. + +Assuming standard logging that would allow us to get all ARC logs by running + +```bash +kubectl logs -l 'app.kubernetes.io/part-of=gha-runner-scale-set-controller' +``` + +which would be very useful for development to begin with. + +The proposal is to add these sets of labels to the pods ARC creates: + +#### controller-manager + +Labels to be set by the Helm chart: + +```yaml +metadata: + labels: + app.kubernetes.io/part-of: gha-runner-scale-set-controller + app.kubernetes.io/component: controller-manager + app.kubernetes.io/version: "x.x.x" +``` + +#### Listener + +Labels to be set by controller at creation: + +```yaml +metadata: + labels: + app.kubernetes.io/part-of: gha-runner-scale-set-controller + app.kubernetes.io/component: runner-scale-set-listener + app.kubernetes.io/version: "x.x.x" + actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet + + # the following labels are to be extracted by the config URL + actions.github.com/enterprise: enterprise + actions.github.com/organization: organization + actions.github.com/repository: repository +``` + +#### Runner + +Labels to be set by controller at creation: + +```yaml +metadata: + labels: + app.kubernetes.io/part-of: gha-runner-scale-set-controller + app.kubernetes.io/component: runner + app.kubernetes.io/version: "x.x.x" + actions.github.com/scale-set-name: scale-set-name # this corresponds to metadata.name as set for AutoscalingRunnerSet + actions.github.com/runner-name: runner-name + actions.github.com/runner-group-name: runner-group-name + + # the following labels are to be extracted by the config URL + actions.github.com/enterprise: enterprise + actions.github.com/organization: organization + actions.github.com/repository: repository +``` + +This would allow us to ask users: + +> Can you please send us the logs coming from pods labelled 'app.kubernetes.io/part-of=gha-runner-scale-set-controller'? + +Or for example if they're having problems specifically with runners: + +> Can you please send us the logs coming from pods labelled 'app.kubernetes.io/component=runner'? + +This way users don't have to understand ARC moving parts but we still have a +way to target them specifically if we need to. + +[^1]: [ADR 2022-12-05](2022-12-05-adding-labels-k8s-resources.md) diff --git a/adrs/yyyy-mm-dd-TEMPLATE.md b/adrs/yyyy-mm-dd-TEMPLATE.md index 8f15d0302c..3c6c187edb 100644 --- a/adrs/yyyy-mm-dd-TEMPLATE.md +++ b/adrs/yyyy-mm-dd-TEMPLATE.md @@ -6,13 +6,13 @@ ## Context -*What is the issue or background knowledge necessary for future readers -to understand why this ADR was written?* +_What is the issue or background knowledge necessary for future readers +to understand why this ADR was written?_ ## Decision -**What** is the change being proposed? / **How** will it be implemented?* +_**What** is the change being proposed? **How** will it be implemented?_ ## Consequences -*What becomes easier or more difficult to do because of this change?* +_What becomes easier or more difficult to do because of this change?_ diff --git a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl index a6b3135ec1..468ddf6d31 100644 --- a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl @@ -39,7 +39,7 @@ helm.sh/chart: {{ include "gha-runner-scale-set-controller.chart" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} -app.kubernetes.io/part-of: {{ .Chart.Name }} +app.kubernetes.io/part-of: gha-runner-scale-set-controller app.kubernetes.io/managed-by: {{ .Release.Service }} {{- range $k, $v := .Values.labels }} {{ $k }}: {{ $v }} @@ -110,4 +110,4 @@ Create the name of the service account to use {{- $names = append $names $v.name }} {{- end }} {{- $names | join ","}} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/gha-runner-scale-set-controller/templates/deployment.yaml b/charts/gha-runner-scale-set-controller/templates/deployment.yaml index a8f02e6233..b239040a8f 100644 --- a/charts/gha-runner-scale-set-controller/templates/deployment.yaml +++ b/charts/gha-runner-scale-set-controller/templates/deployment.yaml @@ -23,7 +23,7 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} labels: - app.kubernetes.io/part-of: actions-runner-controller + app.kubernetes.io/part-of: gha-runner-scale-set-controller app.kubernetes.io/component: controller-manager app.kubernetes.io/version: {{ .Chart.Version }} {{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }} diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index 2a00370ae7..96b671eb0f 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -310,6 +310,7 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"]) assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"]) assert.NotContains(t, deployment.Labels, "actions.github.com/controller-watch-single-namespace") + assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"]) assert.Equal(t, int32(1), *deployment.Spec.Replicas) @@ -416,6 +417,7 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) + assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"]) assert.Equal(t, "bar", deployment.Labels["foo"]) assert.Equal(t, "actions", deployment.Labels["github"]) From 398b4524d59fdb3da6b673ccbe79482457448387 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Fri, 17 Mar 2023 04:26:51 -0400 Subject: [PATCH 149/561] Ignore extra dind container when contaerinMode.type=dind. (#2418) --- .github/workflows/e2e-test-linux-vm.yaml | 2 ++ .../v1alpha1/autoscalingrunnerset_types.go | 1 - .../templates/_helpers.tpl | 8 ++++++ .../templates/autoscalingrunnerset.yaml | 2 +- charts/gha-runner-scale-set/values.yaml | 26 +++++++++---------- 5 files changed, 24 insertions(+), 15 deletions(-) diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 621d6962fe..4b47d58861 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -439,7 +439,9 @@ jobs: --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ --set containerMode.type="kubernetes" \ + --set containerMode.kubernetesModeWorkVolumeClaim.accessModes={"ReadWriteOnce"} \ --set containerMode.kubernetesModeWorkVolumeClaim.storageClassName="openebs-hostpath" \ + --set containerMode.kubernetesModeWorkVolumeClaim.resources.requests.storage="1Gi" \ ./charts/gha-runner-scale-set \ --debug echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT diff --git a/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go index 35003e592a..05e06fb53b 100644 --- a/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go +++ b/apis/actions.github.com/v1alpha1/autoscalingrunnerset_types.go @@ -248,7 +248,6 @@ type AutoscalingRunnerSetStatus struct { } func (ars *AutoscalingRunnerSet) ListenerSpecHash() string { - type listenerSpec = AutoscalingRunnerSetSpec arsSpec := ars.Spec.DeepCopy() spec := arsSpec return hash.ComputeTemplateHash(&spec) diff --git a/charts/gha-runner-scale-set/templates/_helpers.tpl b/charts/gha-runner-scale-set/templates/_helpers.tpl index 229e5d2abe..babaa4aa82 100644 --- a/charts/gha-runner-scale-set/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set/templates/_helpers.tpl @@ -163,6 +163,14 @@ volumeMounts: {{- end }} {{- end }} +{{- define "gha-runner-scale-set.non-runner-non-dind-containers" -}} + {{- range $i, $container := .Values.template.spec.containers }} + {{- if and (ne $container.name "runner") (ne $container.name "dind") }} +- {{ $container | toYaml | nindent 2 }} + {{- end }} + {{- end }} +{{- end }} + {{- define "gha-runner-scale-set.dind-runner-container" -}} {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} {{- range $i, $container := .Values.template.spec.containers }} diff --git a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index e272291b23..526ad12084 100644 --- a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -111,7 +111,7 @@ spec: {{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }} - name: dind {{- include "gha-runner-scale-set.dind-container" . | nindent 8 }} - {{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }} + {{- include "gha-runner-scale-set.non-runner-non-dind-containers" . | nindent 6 }} {{- else if eq .Values.containerMode.type "kubernetes" }} - name: runner {{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }} diff --git a/charts/gha-runner-scale-set/values.yaml b/charts/gha-runner-scale-set/values.yaml index c9a6d815a7..40fb46611c 100644 --- a/charts/gha-runner-scale-set/values.yaml +++ b/charts/gha-runner-scale-set/values.yaml @@ -86,11 +86,11 @@ template: ## template: ## spec: ## initContainers: - ## - name: initExternalsInternalVolume + ## - name: init-dind-externals ## image: ghcr.io/actions/actions-runner:latest ## command: ["cp", "-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"] ## volumeMounts: - ## - name: externalsInternal + ## - name: dind-externals ## mountPath: /home/runner/tmpDir ## containers: ## - name: runner @@ -103,9 +103,9 @@ template: ## - name: DOCKER_CERT_PATH ## value: /certs/client ## volumeMounts: - ## - name: workingDirectoryInternal + ## - name: work ## mountPath: /home/runner/_work - ## - name: dinDInternal + ## - name: dind-cert ## mountPath: /certs/client ## readOnly: true ## - name: dind @@ -113,18 +113,18 @@ template: ## securityContext: ## privileged: true ## volumeMounts: - ## - mountPath: /certs/client - ## name: dinDInternal - ## - mountPath: /home/runner/_work - ## name: workingDirectoryInternal - ## - mountPath: /home/runner/externals - ## name: externalsInternal + ## - name: work + ## mountPath: /home/runner/_work + ## - name: dind-cert + ## mountPath: /certs/client + ## - name: dind-externals + ## mountPath: /home/runner/externals ## volumes: - ## - name: dinDInternal + ## - name: work ## emptyDir: {} - ## - name: workingDirectoryInternal + ## - name: dind-cert ## emptyDir: {} - ## - name: externalsInternal + ## - name: dind-externals ## emptyDir: {} ###################################################################################################### ## with containerMode.type=kubernetes, we will populate the template.spec with following pod spec From cfbca9b35e536241b35d8a26e6df9025f142c949 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Fri, 17 Mar 2023 11:39:56 +0100 Subject: [PATCH 150/561] Update 2022-12-05-adding-labels-k8s-resources.md (#2420) --- adrs/2022-12-05-adding-labels-k8s-resources.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adrs/2022-12-05-adding-labels-k8s-resources.md b/adrs/2022-12-05-adding-labels-k8s-resources.md index 7ce328a264..859e141496 100644 --- a/adrs/2022-12-05-adding-labels-k8s-resources.md +++ b/adrs/2022-12-05-adding-labels-k8s-resources.md @@ -2,7 +2,7 @@ **Date**: 2022-12-05 -**Status**: Deprecated [^1] +**Status**: Superceded [^1] ## Context From 818281606092b134701c0b78ec8a8b8e08c4702c Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 21 Mar 2023 14:11:47 -0400 Subject: [PATCH 151/561] Update e2e workflow (#2430) --- .../execute-assert-arc-e2e/action.yaml | 160 +++++++ .github/actions/setup-arc-e2e/action.yaml | 31 +- .github/workflows/e2e-test-linux-vm.yaml | 441 ++++++------------ 3 files changed, 312 insertions(+), 320 deletions(-) create mode 100644 .github/actions/execute-assert-arc-e2e/action.yaml diff --git a/.github/actions/execute-assert-arc-e2e/action.yaml b/.github/actions/execute-assert-arc-e2e/action.yaml new file mode 100644 index 0000000000..37d9c5853f --- /dev/null +++ b/.github/actions/execute-assert-arc-e2e/action.yaml @@ -0,0 +1,160 @@ +name: 'Execute and Assert ARC E2E Test Action' +description: 'Queue E2E test workflow and assert workflow run result to be succeed' + +inputs: + auth-token: + description: 'GitHub access token to queue workflow run' + required: true + repo-owner: + description: "The repository owner name that has the test workflow file, ex: actions" + required: true + repo-name: + description: "The repository name that has the test workflow file, ex: test" + required: true + workflow-file: + description: 'The file name of the workflow yaml, ex: test.yml' + required: true + arc-name: + description: 'The name of the configured gha-runner-scale-set' + required: true + arc-namespace: + description: 'The namespace of the configured gha-runner-scale-set' + required: true + arc-controller-namespace: + description: 'The namespace of the configured gha-runner-scale-set-controller' + required: true + +runs: + using: "composite" + steps: + - name: Queue test workflow + shell: bash + id: queue_workflow + run: | + queue_time=`date +%FT%TZ` + echo "queue_time=$queue_time" >> $GITHUB_OUTPUT + curl -X POST https://api.github.com/repos/${{inputs.repo-owner}}/${{inputs.repo-name}}/actions/workflows/${{inputs.workflow-file}}/dispatches \ + -H "Accept: application/vnd.github.v3+json" \ + -H "Authorization: token ${{inputs.auth-token}}" \ + -d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }' + + - name: Fetch workflow run & job ids + uses: actions/github-script@v6 + id: query_workflow + with: + script: | + // Try to find the workflow run triggered by the previous step using the workflow_dispatch event. + // - Find recently create workflow runs in the test repository + // - For each workflow run, list its workflow job and see if the job's labels contain `inputs.arc-name` + // - Since the inputs.arc-name should be unique per e2e workflow run, once we find the job with the label, we find the workflow that we just triggered. + function sleep(ms) { + return new Promise(resolve => setTimeout(resolve, ms)) + } + const owner = '${{inputs.repo-owner}}' + const repo = '${{inputs.repo-name}}' + const workflow_id = '${{inputs.workflow-file}}' + let workflow_run_id = 0 + let workflow_job_id = 0 + let workflow_run_html_url = "" + let count = 0 + while (count++<12) { + await sleep(10 * 1000); + let listRunResponse = await github.rest.actions.listWorkflowRuns({ + owner: owner, + repo: repo, + workflow_id: workflow_id, + created: '>${{steps.queue_workflow.outputs.queue_time}}' + }) + if (listRunResponse.data.total_count > 0) { + console.log(`Found some new workflow runs for ${workflow_id}`) + for (let i = 0; i 0) { + for (let j = 0; j 0) { + break; + } + } + } + + if (workflow_job_id > 0) { + break; + } + } + if (workflow_job_id == 0) { + core.setFailed(`Can't find workflow run and workflow job triggered to 'runs-on ${{inputs.arc-name}}'`) + } else { + core.setOutput('workflow_run', workflow_run_id); + core.setOutput('workflow_job', workflow_job_id); + core.setOutput('workflow_run_url', workflow_run_html_url); + } + + - name: Generate summary about the triggered workflow run + shell: bash + run: | + cat <<-EOF > $GITHUB_STEP_SUMMARY + | **Triggered workflow run** | + |:--------------------------:| + | ${{steps.query_workflow.outputs.workflow_run_url}} | + EOF + + - name: Wait for workflow to finish successfully + uses: actions/github-script@v6 + with: + script: | + // Wait 5 minutes and make sure the workflow run we triggered completed with result 'success' + function sleep(ms) { + return new Promise(resolve => setTimeout(resolve, ms)) + } + const owner = '${{inputs.repo-owner}}' + const repo = '${{inputs.repo-name}}' + const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}} + const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}} + let count = 0 + while (count++<10) { + await sleep(30 * 1000); + let getRunResponse = await github.rest.actions.getWorkflowRun({ + owner: owner, + repo: repo, + run_id: workflow_run_id + }) + console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`); + if (getRunResponse.data.status == 'completed') { + if ( getRunResponse.data.conclusion == 'success') { + console.log(`Workflow run finished properly.`) + return + } else { + core.setFailed(`The triggered workflow run finish with result ${getRunResponse.data.conclusion}`) + return + } + } + } + core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`) + + - name: Gather logs and cleanup + shell: bash + if: always() + run: | + helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug + kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }} + kubectl logs deployment/arc-gha-runner-scale-set-controller -n ${{inputs.arc-controller-namespace}} \ No newline at end of file diff --git a/.github/actions/setup-arc-e2e/action.yaml b/.github/actions/setup-arc-e2e/action.yaml index 922bb9f8c9..b0dcb06d50 100644 --- a/.github/actions/setup-arc-e2e/action.yaml +++ b/.github/actions/setup-arc-e2e/action.yaml @@ -2,21 +2,21 @@ name: 'Setup ARC E2E Test Action' description: 'Build controller image, create kind cluster, load the image, and exchange ARC configure token.' inputs: - github-app-id: + app-id: description: 'GitHub App Id for exchange access token' required: true - github-app-pk: + app-pk: description: "GitHub App private key for exchange access token" required: true - github-app-org: - description: 'The organization the GitHub App has installed on' - required: true - docker-image-name: + image-name: description: "Local docker image name for building" required: true - docker-image-tag: + image-tag: description: "Tag of ARC Docker image for building" required: true + target-org: + description: "The test organization for ARC e2e test" + required: true outputs: token: @@ -42,23 +42,22 @@ runs: platforms: linux/amd64 load: true build-args: | - DOCKER_IMAGE_NAME=${{inputs.docker-image-name}} - VERSION=${{inputs.docker-image-tag}} + DOCKER_IMAGE_NAME=${{inputs.image-name}} + VERSION=${{inputs.image-tag}} tags: | - ${{inputs.docker-image-name}}:${{inputs.docker-image-tag}} - cache-from: type=gha - cache-to: type=gha,mode=max + ${{inputs.image-name}}:${{inputs.image-tag}} + no-cache: true - name: Create minikube cluster and load image shell: bash run: | minikube start - minikube image load ${{inputs.docker-image-name}}:${{inputs.docker-image-tag}} + minikube image load ${{inputs.image-name}}:${{inputs.image-tag}} - name: Get configure token id: config-token uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db with: - application_id: ${{ inputs.github-app-id }} - application_private_key: ${{ inputs.github-app-pk }} - organization: ${{ inputs.github-app-org }} \ No newline at end of file + application_id: ${{ inputs.app-id }} + application_private_key: ${{ inputs.app-pk }} + organization: ${{ inputs.target-org}} \ No newline at end of file diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 4b47d58861..9a64405c4c 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -8,52 +8,35 @@ on: branches: - master workflow_dispatch: - inputs: - target_org: - description: The org of the test repository. - required: true - default: actions-runner-controller - target_repo: - description: The repository to install the ARC. - required: true - default: arc_e2e_test_dummy + +permissions: + contents: read env: TARGET_ORG: actions-runner-controller TARGET_REPO: arc_e2e_test_dummy IMAGE_NAME: "arc-test-image" IMAGE_VERSION: "dev" - + jobs: default-setup: runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: "arc-test-workflow.yaml" steps: - uses: actions/checkout@v3 - - - name: Resolve inputs - id: resolved_inputs - run: | - TARGET_ORG="${{env.TARGET_ORG}}" - TARGET_REPO="${{env.TARGET_REPO}}" - if [ ! -z "${{inputs.target_org}}" ]; then - TARGET_ORG="${{inputs.target_org}}" - fi - if [ ! -z "${{inputs.target_repo}}" ]; then - TARGET_REPO="${{inputs.target_repo}}" - fi - echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT - echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + with: + ref: ${{github.head_ref}} - uses: ./.github/actions/setup-arc-e2e id: setup with: - github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} - github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} - github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} - docker-image-name: ${{env.IMAGE_NAME}} - docker-image-tag: ${{env.IMAGE_VERSION}} + app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}} + app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}} + image-name: ${{env.IMAGE_NAME}} + image-tag: ${{env.IMAGE_VERSION}} + target-org: ${{env.TARGET_ORG}} - name: Install gha-runner-scale-set-controller id: install_arc_controller @@ -85,11 +68,11 @@ jobs: - name: Install gha-runner-scale-set id: install_arc run: | - ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1)) helm install "$ARC_NAME" \ --namespace "arc-runners" \ --create-namespace \ - --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \ --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ ./charts/gha-runner-scale-set \ --debug @@ -109,64 +92,37 @@ jobs: done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME kubectl get pod -n arc-systems - - - name: Test ARC scales pods up and down - id: test - run: | - export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" - export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" - export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}" - go test ./test_e2e_arc -v - - - name: Uninstall gha-runner-scale-set - if: always() && steps.install_arc.outcome == 'success' - run: | - helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners - kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} - - - name: Dump gha-runner-scale-set-controller logs - if: always() && steps.install_arc_controller.outcome == 'success' - run: | - kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems - - - name: Job summary - if: always() && steps.install_arc.outcome == 'success' - run: | - cat <<-EOF > $GITHUB_STEP_SUMMARY - | **Outcome** | ${{ steps.test.outcome }} | - |----------------|--------------------------------------------- | - | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | - EOF + + - name: Test ARC E2E + uses: ./.github/actions/execute-assert-arc-e2e + timeout-minutes: 10 + with: + auth-token: ${{ steps.setup.outputs.token }} + repo-owner: ${{ env.TARGET_ORG }} + repo-name: ${{env.TARGET_REPO}} + workflow-file: ${{env.WORKFLOW_FILE}} + arc-name: ${{steps.install_arc.outputs.ARC_NAME}} + arc-namespace: "arc-runners" + arc-controller-namespace: "arc-systems" single-namespace-setup: runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: "arc-test-workflow.yaml" steps: - uses: actions/checkout@v3 - - - name: Resolve inputs - id: resolved_inputs - run: | - TARGET_ORG="${{env.TARGET_ORG}}" - TARGET_REPO="${{env.TARGET_REPO}}" - if [ ! -z "${{inputs.target_org}}" ]; then - TARGET_ORG="${{inputs.target_org}}" - fi - if [ ! -z "${{inputs.target_repo}}" ]; then - TARGET_REPO="${{inputs.target_repo}}" - fi - echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT - echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + with: + ref: ${{github.head_ref}} - uses: ./.github/actions/setup-arc-e2e id: setup with: - github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} - github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} - github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} - docker-image-name: ${{env.IMAGE_NAME}} - docker-image-tag: ${{env.IMAGE_VERSION}} + app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}} + app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}} + image-name: ${{env.IMAGE_NAME}} + image-tag: ${{env.IMAGE_VERSION}} + target-org: ${{env.TARGET_ORG}} - name: Install gha-runner-scale-set-controller id: install_arc_controller @@ -200,11 +156,11 @@ jobs: - name: Install gha-runner-scale-set id: install_arc run: | - ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1)) helm install "$ARC_NAME" \ --namespace "arc-runners" \ --create-namespace \ - --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \ --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ ./charts/gha-runner-scale-set \ --debug @@ -225,63 +181,36 @@ jobs: kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME kubectl get pod -n arc-systems - - name: Test ARC scales pods up and down - id: test - run: | - export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" - export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" - export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}" - go test ./test_e2e_arc -v - - - name: Uninstall gha-runner-scale-set - if: always() && steps.install_arc.outcome == 'success' - run: | - helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners - kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} - - - name: Dump gha-runner-scale-set-controller logs - if: always() && steps.install_arc_controller.outcome == 'success' - run: | - kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems - - - name: Job summary - if: always() && steps.install_arc.outcome == 'success' - run: | - cat <<-EOF > $GITHUB_STEP_SUMMARY - | **Outcome** | ${{ steps.test.outcome }} | - |----------------|--------------------------------------------- | - | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | - EOF + - name: Test ARC E2E + uses: ./.github/actions/execute-assert-arc-e2e + timeout-minutes: 10 + with: + auth-token: ${{ steps.setup.outputs.token }} + repo-owner: ${{ env.TARGET_ORG }} + repo-name: ${{env.TARGET_REPO}} + workflow-file: ${{env.WORKFLOW_FILE}} + arc-name: ${{steps.install_arc.outputs.ARC_NAME}} + arc-namespace: "arc-runners" + arc-controller-namespace: "arc-systems" dind-mode-setup: runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: arc-test-dind-workflow.yaml steps: - uses: actions/checkout@v3 - - - name: Resolve inputs - id: resolved_inputs - run: | - TARGET_ORG="${{env.TARGET_ORG}}" - TARGET_REPO="${{env.TARGET_REPO}}" - if [ ! -z "${{inputs.target_org}}" ]; then - TARGET_ORG="${{inputs.target_org}}" - fi - if [ ! -z "${{inputs.target_repo}}" ]; then - TARGET_REPO="${{inputs.target_repo}}" - fi - echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT - echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + with: + ref: ${{github.head_ref}} - uses: ./.github/actions/setup-arc-e2e id: setup with: - github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} - github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} - github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} - docker-image-name: ${{env.IMAGE_NAME}} - docker-image-tag: ${{env.IMAGE_VERSION}} + app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}} + app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}} + image-name: ${{env.IMAGE_NAME}} + image-tag: ${{env.IMAGE_VERSION}} + target-org: ${{env.TARGET_ORG}} - name: Install gha-runner-scale-set-controller id: install_arc_controller @@ -313,11 +242,11 @@ jobs: - name: Install gha-runner-scale-set id: install_arc run: | - ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1)) helm install "$ARC_NAME" \ --namespace "arc-runners" \ --create-namespace \ - --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \ --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ --set containerMode.type="dind" \ ./charts/gha-runner-scale-set \ @@ -339,67 +268,45 @@ jobs: kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME kubectl get pod -n arc-systems - - name: Test ARC scales pods up and down - id: test - run: | - export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" - export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" - export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}" - go test ./test_e2e_arc -v - - - name: Uninstall gha-runner-scale-set - if: always() && steps.install_arc.outcome == 'success' - run: | - helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners - kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} - - - name: Dump gha-runner-scale-set-controller logs - if: always() && steps.install_arc_controller.outcome == 'success' - run: | - kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems - - - name: Job summary - if: always() && steps.install_arc.outcome == 'success' - run: | - cat <<-EOF > $GITHUB_STEP_SUMMARY - | **Outcome** | ${{ steps.test.outcome }} | - |----------------|--------------------------------------------- | - | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | - EOF + - name: Test ARC E2E + uses: ./.github/actions/execute-assert-arc-e2e + timeout-minutes: 10 + with: + auth-token: ${{ steps.setup.outputs.token }} + repo-owner: ${{ env.TARGET_ORG }} + repo-name: ${{env.TARGET_REPO}} + workflow-file: ${{env.WORKFLOW_FILE}} + arc-name: ${{steps.install_arc.outputs.ARC_NAME}} + arc-namespace: "arc-runners" + arc-controller-namespace: "arc-systems" kubernetes-mode-setup: runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml" steps: - uses: actions/checkout@v3 - - - name: Resolve inputs - id: resolved_inputs - run: | - TARGET_ORG="${{env.TARGET_ORG}}" - TARGET_REPO="${{env.TARGET_REPO}}" - if [ ! -z "${{inputs.target_org}}" ]; then - TARGET_ORG="${{inputs.target_org}}" - fi - if [ ! -z "${{inputs.target_repo}}" ]; then - TARGET_REPO="${{inputs.target_repo}}" - fi - echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT - echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + with: + ref: ${{github.head_ref}} - uses: ./.github/actions/setup-arc-e2e id: setup with: - github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} - github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} - github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} - docker-image-name: ${{env.IMAGE_NAME}} - docker-image-tag: ${{env.IMAGE_VERSION}} + app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}} + app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}} + image-name: ${{env.IMAGE_NAME}} + image-tag: ${{env.IMAGE_VERSION}} + target-org: ${{env.TARGET_ORG}} - name: Install gha-runner-scale-set-controller id: install_arc_controller run: | + echo "Install openebs/dynamic-localpv-provisioner" + helm repo add openebs https://openebs.github.io/charts + helm repo update + helm install openebs openebs/openebs -n openebs --create-namespace + helm install arc \ --namespace "arc-systems" \ --create-namespace \ @@ -423,20 +330,16 @@ jobs: kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl get pod -n arc-systems kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems + kubectl wait --timeout=30s --for=condition=ready pod -n openebs -l name=openebs-localpv-provisioner - name: Install gha-runner-scale-set id: install_arc run: | - echo "Install openebs/dynamic-localpv-provisioner" - helm repo add openebs https://openebs.github.io/charts - helm repo update - helm install openebs openebs/openebs -n openebs --create-namespace - - ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1)) helm install "$ARC_NAME" \ --namespace "arc-runners" \ --create-namespace \ - --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \ --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ --set containerMode.type="kubernetes" \ --set containerMode.kubernetesModeWorkVolumeClaim.accessModes={"ReadWriteOnce"} \ @@ -461,63 +364,36 @@ jobs: kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME kubectl get pod -n arc-systems - - name: Test ARC scales pods up and down - id: test - run: | - export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" - export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" - export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}" - go test ./test_e2e_arc -v - - - name: Uninstall gha-runner-scale-set - if: always() && steps.install_arc.outcome == 'success' - run: | - helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners - kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} - - - name: Dump gha-runner-scale-set-controller logs - if: always() && steps.install_arc_controller.outcome == 'success' - run: | - kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems - - - name: Job summary - if: always() && steps.install_arc.outcome == 'success' - run: | - cat <<-EOF > $GITHUB_STEP_SUMMARY - | **Outcome** | ${{ steps.test.outcome }} | - |----------------|--------------------------------------------- | - | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | - EOF + - name: Test ARC E2E + uses: ./.github/actions/execute-assert-arc-e2e + timeout-minutes: 10 + with: + auth-token: ${{ steps.setup.outputs.token }} + repo-owner: ${{ env.TARGET_ORG }} + repo-name: ${{env.TARGET_REPO}} + workflow-file: ${{env.WORKFLOW_FILE}} + arc-name: ${{steps.install_arc.outputs.ARC_NAME}} + arc-namespace: "arc-runners" + arc-controller-namespace: "arc-systems" auth-proxy-setup: runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: "arc-test-workflow.yaml" steps: - uses: actions/checkout@v3 - - - name: Resolve inputs - id: resolved_inputs - run: | - TARGET_ORG="${{env.TARGET_ORG}}" - TARGET_REPO="${{env.TARGET_REPO}}" - if [ ! -z "${{inputs.target_org}}" ]; then - TARGET_ORG="${{inputs.target_org}}" - fi - if [ ! -z "${{inputs.target_repo}}" ]; then - TARGET_REPO="${{inputs.target_repo}}" - fi - echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT - echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + with: + ref: ${{github.head_ref}} - uses: ./.github/actions/setup-arc-e2e id: setup with: - github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} - github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} - github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} - docker-image-name: ${{env.IMAGE_NAME}} - docker-image-tag: ${{env.IMAGE_VERSION}} + app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}} + app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}} + image-name: ${{env.IMAGE_NAME}} + image-tag: ${{env.IMAGE_VERSION}} + target-org: ${{env.TARGET_ORG}} - name: Install gha-runner-scale-set-controller id: install_arc_controller @@ -558,11 +434,11 @@ jobs: --namespace=arc-runners \ --from-literal=username=github \ --from-literal=password='actions' - ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1)) helm install "$ARC_NAME" \ --namespace "arc-runners" \ --create-namespace \ - --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \ --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ --set proxy.https.url="http://host.minikube.internal:3128" \ --set proxy.https.credentialSecretRef="proxy-auth" \ @@ -586,63 +462,36 @@ jobs: kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME kubectl get pod -n arc-systems - - name: Test ARC scales pods up and down - id: test - run: | - export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" - export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" - export WORKFLOW_FILE="${{env.WORKFLOW_FILE}}" - go test ./test_e2e_arc -v - - - name: Uninstall gha-runner-scale-set - if: always() && steps.install_arc.outcome == 'success' - run: | - helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners - kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} - - - name: Dump gha-runner-scale-set-controller logs - if: always() && steps.install_arc_controller.outcome == 'success' - run: | - kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems - - - name: Job summary - if: always() && steps.install_arc.outcome == 'success' - run: | - cat <<-EOF > $GITHUB_STEP_SUMMARY - | **Outcome** | ${{ steps.test.outcome }} | - |----------------|--------------------------------------------- | - | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | - EOF + - name: Test ARC E2E + uses: ./.github/actions/execute-assert-arc-e2e + timeout-minutes: 10 + with: + auth-token: ${{ steps.setup.outputs.token }} + repo-owner: ${{ env.TARGET_ORG }} + repo-name: ${{env.TARGET_REPO}} + workflow-file: ${{env.WORKFLOW_FILE}} + arc-name: ${{steps.install_arc.outputs.ARC_NAME}} + arc-namespace: "arc-runners" + arc-controller-namespace: "arc-systems" anonymous-proxy-setup: runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: "arc-test-workflow.yaml" steps: - uses: actions/checkout@v3 - - - name: Resolve inputs - id: resolved_inputs - run: | - TARGET_ORG="${{env.TARGET_ORG}}" - TARGET_REPO="${{env.TARGET_REPO}}" - if [ ! -z "${{inputs.target_org}}" ]; then - TARGET_ORG="${{inputs.target_org}}" - fi - if [ ! -z "${{inputs.target_repo}}" ]; then - TARGET_REPO="${{inputs.target_repo}}" - fi - echo "TARGET_ORG=$TARGET_ORG" >> $GITHUB_OUTPUT - echo "TARGET_REPO=$TARGET_REPO" >> $GITHUB_OUTPUT + with: + ref: ${{github.head_ref}} - uses: ./.github/actions/setup-arc-e2e id: setup with: - github-app-id: ${{secrets.ACTIONS_ACCESS_APP_ID}} - github-app-pk: ${{secrets.ACTIONS_ACCESS_PK}} - github-app-org: ${{steps.resolved_inputs.outputs.TARGET_ORG}} - docker-image-name: ${{env.IMAGE_NAME}} - docker-image-tag: ${{env.IMAGE_VERSION}} + app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}} + app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}} + image-name: ${{env.IMAGE_NAME}} + image-tag: ${{env.IMAGE_VERSION}} + target-org: ${{env.TARGET_ORG}} - name: Install gha-runner-scale-set-controller id: install_arc_controller @@ -678,11 +527,11 @@ jobs: --name squid \ --publish 3128:3128 \ ubuntu/squid:latest - ARC_NAME=arc-runner-${{github.job}}-$(date +'%M-%S')-$(($RANDOM % 100 + 1)) + ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1)) helm install "$ARC_NAME" \ --namespace "arc-runners" \ --create-namespace \ - --set githubConfigUrl="https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}" \ + --set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \ --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ --set proxy.https.url="http://host.minikube.internal:3128" \ --set "proxy.noProxy[0]=10.96.0.1:443" \ @@ -705,30 +554,14 @@ jobs: kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME kubectl get pod -n arc-systems - - name: Test ARC scales pods up and down - id: test - run: | - export GITHUB_TOKEN="${{ steps.setup.outputs.token }}" - export ARC_NAME="${{ steps.install_arc.outputs.ARC_NAME }}" - export WORKFLOW_FILE="${{ env.WORKFLOW_FILE }}" - go test ./test_e2e_arc -v - - - name: Uninstall gha-runner-scale-set - if: always() && steps.install_arc.outcome == 'success' - run: | - helm uninstall ${{ steps.install_arc.outputs.ARC_NAME }} --namespace arc-runners - kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n demo -l app.kubernetes.io/instance=${{ steps.install_arc.outputs.ARC_NAME }} - - - name: Dump gha-runner-scale-set-controller logs - if: always() && steps.install_arc_controller.outcome == 'success' - run: | - kubectl logs deployment/arc-gha-runner-scale-set-controller -n arc-systems - - - name: Job summary - if: always() && steps.install_arc.outcome == 'success' - run: | - cat <<-EOF > $GITHUB_STEP_SUMMARY - | **Outcome** | ${{ steps.test.outcome }} | - |----------------|--------------------------------------------- | - | **References** | [Test workflow runs](https://github.com/${{ steps.resolved_inputs.outputs.TARGET_ORG }}/${{steps.resolved_inputs.outputs.TARGET_REPO}}/actions/workflows/${{ env.WORKFLOW_FILE }}) | - EOF + - name: Test ARC E2E + uses: ./.github/actions/execute-assert-arc-e2e + timeout-minutes: 10 + with: + auth-token: ${{ steps.setup.outputs.token }} + repo-owner: ${{ env.TARGET_ORG }} + repo-name: ${{env.TARGET_REPO}} + workflow-file: ${{env.WORKFLOW_FILE}} + arc-name: ${{steps.install_arc.outputs.ARC_NAME}} + arc-namespace: "arc-runners" + arc-controller-namespace: "arc-systems" From 91f8fecedce749ee4a7351ed6b66daa73a980e65 Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Thu, 23 Mar 2023 14:39:37 +0000 Subject: [PATCH 152/561] Add new workflows (#2423) --- .github/workflows/go.yaml | 80 ++++++++++++++++++++++++++++ .github/workflows/golangci-lint.yaml | 23 -------- .github/workflows/validate-arc.yaml | 60 --------------------- go.mod | 2 +- 4 files changed, 81 insertions(+), 84 deletions(-) create mode 100644 .github/workflows/go.yaml delete mode 100644 .github/workflows/golangci-lint.yaml delete mode 100644 .github/workflows/validate-arc.yaml diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml new file mode 100644 index 0000000000..153bf8b3e3 --- /dev/null +++ b/.github/workflows/go.yaml @@ -0,0 +1,80 @@ +name: Go +on: + push: + branches: + - master + paths: + - '.github/workflows/go.yaml' + - '**.go' + - 'go.mod' + - 'go.sum' + + pull_request: + paths: + - '.github/workflows/go.yaml' + - '**.go' + - 'go.mod' + - 'go.sum' + +permissions: + contents: read + +jobs: + fmt: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version-file: 'go.mod' + cache: false + - name: fmt + run: go fmt ./... + - name: Check diff + run: git diff --exit-code + + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version-file: 'go.mod' + cache: false + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + only-new-issues: true + version: v1.51.1 + + generate: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version-file: 'go.mod' + cache: false + - name: Generate + run: make generate + - name: Check diff + run: git diff --exit-code + + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version-file: 'go.mod' + - run: make manifests + - name: Check diff + run: git diff --exit-code + - name: Install kubebuilder + run: | + curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz + tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz + sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder + - name: Run go tests + run: | + go test -short `go list ./... | grep -v ./test_e2e_arc` diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml deleted file mode 100644 index 13e5fb55d1..0000000000 --- a/.github/workflows/golangci-lint.yaml +++ /dev/null @@ -1,23 +0,0 @@ -name: golangci-lint -on: - push: - branches: - - master - pull_request: -permissions: - contents: read - pull-requests: read -jobs: - golangci: - name: lint - runs-on: ubuntu-latest - steps: - - uses: actions/setup-go@v3 - with: - go-version: 1.19 - - uses: actions/checkout@v3 - - name: golangci-lint - uses: golangci/golangci-lint-action@v3 - with: - only-new-issues: true - version: v1.51.1 diff --git a/.github/workflows/validate-arc.yaml b/.github/workflows/validate-arc.yaml deleted file mode 100644 index e487f5fb1a..0000000000 --- a/.github/workflows/validate-arc.yaml +++ /dev/null @@ -1,60 +0,0 @@ -name: Validate ARC - -on: - pull_request: - branches: - - master - paths-ignore: - - '**.md' - - '.github/ISSUE_TEMPLATE/**' - - '.github/workflows/publish-canary.yaml' - - '.github/workflows/validate-chart.yaml' - - '.github/workflows/publish-chart.yaml' - - '.github/workflows/runners.yaml' - - '.github/workflows/publish-arc.yaml' - - '.github/workflows/validate-entrypoint.yaml' - - '.github/renovate.*' - - 'runner/**' - - '.gitignore' - - 'PROJECT' - - 'LICENSE' - - 'Makefile' - -permissions: - contents: read - -jobs: - test-controller: - name: Test ARC - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Set-up Go - uses: actions/setup-go@v3 - with: - go-version: '1.19' - check-latest: false - - - uses: actions/cache@v3 - with: - path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - - name: Install kubebuilder - run: | - curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz - tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz - sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder - - - name: Run tests - run: | - make test - - - name: Verify manifests are up-to-date - run: | - make manifests - git diff --exit-code diff --git a/go.mod b/go.mod index 90f6c0e072..c5a759eb54 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/actions/actions-runner-controller -go 1.19 +go 1.20 require ( github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 From 9b616ca83b5d34af71de48cc67b18afb058fe390 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Thu, 23 Mar 2023 16:40:58 +0100 Subject: [PATCH 153/561] Enhance quickstart troubleshooting guidelines (#2435) --- .../gha-runner-scale-set-controller/README.md | 95 ++++++++----------- 1 file changed, 39 insertions(+), 56 deletions(-) diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index c1aa774685..31ed62cd1f 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -1,7 +1,5 @@ # Autoscaling Runner Scale Sets mode -**⚠️ This mode is currently only available for a limited number of organizations.** - This new autoscaling mode brings numerous enhancements (described in the following sections) that will make your experience more reliable and secure. ## How it works @@ -157,8 +155,10 @@ You can check the logs of the controller pod using the following command: ```bash # Controller logs -$ kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-runner-scale-set-controller +kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-runner-scale-set-controller +``` +```bash # Runner set listener logs kubectl logs -n "${NAMESPACE}" -l auto-scaling-runner-set-namespace=arc-systems -l auto-scaling-runner-set-name=arc-runner-set ``` @@ -181,6 +181,42 @@ Error: INSTALLATION FAILED: execution error at (gha-runner-scale-set/templates/a Verify that the secret you provided is correct and that the `githubConfigUrl` you provided is accurate. +### Access to the path `/home/runner/_work/_tool` is denied error + +You might see this error if you're using kubernetes mode with persistent volumes. This is because the runner container is running with a non-root user and is causing a permissions mismatch with the mounted volume. + +To fix this, you can either: + +1. Use a volume type that supports `securityContext.fsGroup` (`hostPath` volumes don't support it, `local` volumes do as well as other types). Update the `fsGroup` of your runner pod to match the GID of the runner. You can do that by updating the `gha-runner-scale-set` helm chart values to include the following: + + ```yaml + spec: + securityContext: + fsGroup: 123 + containers: + - name: runner + image: ghcr.io/actions/actions-runner: # Replace with the version you want to use + command: ["/home/runner/run.sh"] + ``` + +1. If updating the `securityContext` of your runner pod is not a viable solution, you can workaround the issue by using `initContainers` to change the mounted volume's ownership, as follows: + + ```yaml + template: + spec: + initContainers: + - name: kube-init + image: ghcr.io/actions/actions-runner:latest + command: ["sudo", "chown", "-R", "1001:123", "/home/runner/_work"] + volumeMounts: + - name: work + mountPath: /home/runner/_work + containers: + - name: runner + image: ghcr.io/actions/actions-runner:latest + command: ["/home/runner/run.sh"] + ``` + ## Changelog ### v0.3.0 @@ -207,56 +243,3 @@ Verify that the secret you provided is correct and that the `githubConfigUrl` yo 1. Fixed a bug that was preventing runner scale from being removed from the backend when they were deleted from the cluster [#2255](https://github.com/actions/actions-runner-controller/pull/2255) [#2223](https://github.com/actions/actions-runner-controller/pull/2223) 1. Fixed bugs with the helm chart definitions preventing certain values from being set [#2222](https://github.com/actions/actions-runner-controller/pull/2222) 1. Fixed a bug that prevented the configuration of a runner group for a runner scale set [#2216](https://github.com/actions/actions-runner-controller/pull/2216) - -#### Log - -- [1c7b7f4](https://github.com/actions/actions-runner-controller/commit/1c7b7f4) Bump arc-2 chart version and prepare 0.2.0 release [#2313](https://github.com/actions/actions-runner-controller/pull/2313) -- [73e22a1](https://github.com/actions/actions-runner-controller/commit/73e22a1) Disable metrics serving in proxy tests [#2307](https://github.com/actions/actions-runner-controller/pull/2307) -- [9b44f00](https://github.com/actions/actions-runner-controller/commit/9b44f00) Documentation corrections [#2116](https://github.com/actions/actions-runner-controller/pull/2116) -- [6b4250c](https://github.com/actions/actions-runner-controller/commit/6b4250c) Add support for proxy [#2286](https://github.com/actions/actions-runner-controller/pull/2286) -- [ced8822](https://github.com/actions/actions-runner-controller/commit/ced8822) Resolves the erroneous webhook scale down due to check runs [#2119](https://github.com/actions/actions-runner-controller/pull/2119) -- [44c06c2](https://github.com/actions/actions-runner-controller/commit/44c06c2) fix: case-insensitive webhook label matching [#2302](https://github.com/actions/actions-runner-controller/pull/2302) -- [4103fe3](https://github.com/actions/actions-runner-controller/commit/4103fe3) Use DOCKER_IMAGE_NAME instead of NAME to avoid conflict. [#2303](https://github.com/actions/actions-runner-controller/pull/2303) -- [a44fe04](https://github.com/actions/actions-runner-controller/commit/a44fe04) Fix manager crashloopback for ARC deployments without scaleset-related controllers [#2293](https://github.com/actions/actions-runner-controller/pull/2293) -- [274d0c8](https://github.com/actions/actions-runner-controller/commit/274d0c8) Added ability to configure log level from chart values [#2252](https://github.com/actions/actions-runner-controller/pull/2252) -- [256e08e](https://github.com/actions/actions-runner-controller/commit/256e08e) Ask runner to wait for docker daemon from DinD. [#2292](https://github.com/actions/actions-runner-controller/pull/2292) -- [f677fd5](https://github.com/actions/actions-runner-controller/commit/f677fd5) doc: Fix chart name for helm commands in docs [#2287](https://github.com/actions/actions-runner-controller/pull/2287) -- [d962714](https://github.com/actions/actions-runner-controller/commit/d962714) Fix helm chart when containerMode.type=dind. [#2291](https://github.com/actions/actions-runner-controller/pull/2291) -- [3886f28](https://github.com/actions/actions-runner-controller/commit/3886f28) Add EKS test environment Terraform templates [#2290](https://github.com/actions/actions-runner-controller/pull/2290) -- [dab9004](https://github.com/actions/actions-runner-controller/commit/dab9004) Added workflow to be triggered via rest api dispatch in e2e test [#2283](https://github.com/actions/actions-runner-controller/pull/2283) -- [dd8ec1a](https://github.com/actions/actions-runner-controller/commit/dd8ec1a) Add testserver package [#2281](https://github.com/actions/actions-runner-controller/pull/2281) -- [8e52a6d](https://github.com/actions/actions-runner-controller/commit/8e52a6d) EphemeralRunner: On cleanup, if pod is pending, delete from service [#2255](https://github.com/actions/actions-runner-controller/pull/2255) -- [9990243](https://github.com/actions/actions-runner-controller/commit/9990243) Early return if finalizer does not exist to make it more readable [#2262](https://github.com/actions/actions-runner-controller/pull/2262) -- [0891981](https://github.com/actions/actions-runner-controller/commit/0891981) Port ADRs from internal repo [#2267](https://github.com/actions/actions-runner-controller/pull/2267) -- [facae69](https://github.com/actions/actions-runner-controller/commit/facae69) Remove un-required permissions for the manager-role of the new `AutoScalingRunnerSet` [#2260](https://github.com/actions/actions-runner-controller/pull/2260) -- [8f62e35](https://github.com/actions/actions-runner-controller/commit/8f62e35) Add options to multi client [#2257](https://github.com/actions/actions-runner-controller/pull/2257) -- [55951c2](https://github.com/actions/actions-runner-controller/commit/55951c2) Add new workflow to automate runner updates [#2247](https://github.com/actions/actions-runner-controller/pull/2247) -- [c4297d2](https://github.com/actions/actions-runner-controller/commit/c4297d2) Avoid deleting scale set if annotation is not parsable or if it does not exist [#2239](https://github.com/actions/actions-runner-controller/pull/2239) -- [0774f06](https://github.com/actions/actions-runner-controller/commit/0774f06) ADR: automate runner updates [#2244](https://github.com/actions/actions-runner-controller/pull/2244) -- [92ab11b](https://github.com/actions/actions-runner-controller/commit/92ab11b) Use UUID v5 for client identifiers [#2241](https://github.com/actions/actions-runner-controller/pull/2241) -- [7414dc6](https://github.com/actions/actions-runner-controller/commit/7414dc6) Add Identifier to actions.Client [#2237](https://github.com/actions/actions-runner-controller/pull/2237) -- [34efb9d](https://github.com/actions/actions-runner-controller/commit/34efb9d) Add documentation to update ARC with prometheus CRDs needed by actions metrics server [#2209](https://github.com/actions/actions-runner-controller/pull/2209) -- [fbad561](https://github.com/actions/actions-runner-controller/commit/fbad561) Allow provide pre-defined kubernetes secret when helm-install AutoScalingRunnerSet [#2234](https://github.com/actions/actions-runner-controller/pull/2234) -- [a5cef7e](https://github.com/actions/actions-runner-controller/commit/a5cef7e) Resolve CI break due to bad merge. [#2236](https://github.com/actions/actions-runner-controller/pull/2236) -- [1f4fe46](https://github.com/actions/actions-runner-controller/commit/1f4fe46) Delete RunnerScaleSet on service when AutoScalingRunnerSet is deleted. [#2223](https://github.com/actions/actions-runner-controller/pull/2223) -- [067686c](https://github.com/actions/actions-runner-controller/commit/067686c) Fix typos and markdown structure in troubleshooting guide [#2148](https://github.com/actions/actions-runner-controller/pull/2148) -- [df12e00](https://github.com/actions/actions-runner-controller/commit/df12e00) Remove network requests from actions.NewClient [#2219](https://github.com/actions/actions-runner-controller/pull/2219) -- [cc26593](https://github.com/actions/actions-runner-controller/commit/cc26593) Skip CT when list-changed=false. [#2228](https://github.com/actions/actions-runner-controller/pull/2228) -- [835eac7](https://github.com/actions/actions-runner-controller/commit/835eac7) Fix helm charts when pass values file. [#2222](https://github.com/actions/actions-runner-controller/pull/2222) -- [01e9dd3](https://github.com/actions/actions-runner-controller/commit/01e9dd3) Update Validate ARC workflow to go 1.19 [#2220](https://github.com/actions/actions-runner-controller/pull/2220) -- [8038181](https://github.com/actions/actions-runner-controller/commit/8038181) Allow update runner group for AutoScalingRunnerSet [#2216](https://github.com/actions/actions-runner-controller/pull/2216) -- [219ba5b](https://github.com/actions/actions-runner-controller/commit/219ba5b) chore(deps): bump sigs.k8s.io/controller-runtime from 0.13.1 to 0.14.1 [#2132](https://github.com/actions/actions-runner-controller/pull/2132) -- [b09e3a2](https://github.com/actions/actions-runner-controller/commit/b09e3a2) Return error for non-existing runner group. [#2215](https://github.com/actions/actions-runner-controller/pull/2215) -- [7ea60e4](https://github.com/actions/actions-runner-controller/commit/7ea60e4) Fix intermittent image push failures to GHCR [#2214](https://github.com/actions/actions-runner-controller/pull/2214) -- [c8918f5](https://github.com/actions/actions-runner-controller/commit/c8918f5) Fix URL for authenticating using a GitHub app [#2206](https://github.com/actions/actions-runner-controller/pull/2206) -- [d57d17f](https://github.com/actions/actions-runner-controller/commit/d57d17f) Add support for custom CA in actions.Client [#2199](https://github.com/actions/actions-runner-controller/pull/2199) -- [6e69c75](https://github.com/actions/actions-runner-controller/commit/6e69c75) chore(deps): bump github.com/hashicorp/go-retryablehttp from 0.7.1 to 0.7.2 [#2203](https://github.com/actions/actions-runner-controller/pull/2203) -- [882bfab](https://github.com/actions/actions-runner-controller/commit/882bfab) Renaming autoScaling to autoscaling in tests matching the convention [#2201](https://github.com/actions/actions-runner-controller/pull/2201) -- [3327f62](https://github.com/actions/actions-runner-controller/commit/3327f62) Refactor actions.Client with options to help extensibility [#2193](https://github.com/actions/actions-runner-controller/pull/2193) -- [282f2dd](https://github.com/actions/actions-runner-controller/commit/282f2dd) chore(deps): bump github.com/onsi/gomega from 1.20.2 to 1.25.0 [#2169](https://github.com/actions/actions-runner-controller/pull/2169) -- [d67f808](https://github.com/actions/actions-runner-controller/commit/d67f808) Include nikola-jokic in CODEOWNERS file [#2184](https://github.com/actions/actions-runner-controller/pull/2184) -- [4932412](https://github.com/actions/actions-runner-controller/commit/4932412) Fix L0 test to make it more reliable. [#2178](https://github.com/actions/actions-runner-controller/pull/2178) -- [6da1cde](https://github.com/actions/actions-runner-controller/commit/6da1cde) Update runner version to 2.301.1 [#2182](https://github.com/actions/actions-runner-controller/pull/2182) -- [f9bae70](https://github.com/actions/actions-runner-controller/commit/f9bae70) Add distinct namespace best practice note [#2181](https://github.com/actions/actions-runner-controller/pull/2181) -- [05a3908](https://github.com/actions/actions-runner-controller/commit/05a3908) Add arc-2 quickstart guide [#2180](https://github.com/actions/actions-runner-controller/pull/2180) -- [606ed1b](https://github.com/actions/actions-runner-controller/commit/606ed1b) Add Repository information to Runner Status [#2093](https://github.com/actions/actions-runner-controller/pull/2093) From e584ab4e8eddb97b23ef3d6c51ef2d546d781cca Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Fri, 24 Mar 2023 12:11:57 +0100 Subject: [PATCH 154/561] Delete e2e-test-dispatch-workflow.yaml (#2441) --- .../workflows/e2e-test-dispatch-workflow.yaml | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 .github/workflows/e2e-test-dispatch-workflow.yaml diff --git a/.github/workflows/e2e-test-dispatch-workflow.yaml b/.github/workflows/e2e-test-dispatch-workflow.yaml deleted file mode 100644 index baf34b503e..0000000000 --- a/.github/workflows/e2e-test-dispatch-workflow.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: ARC Reusable Workflow -on: - workflow_dispatch: - inputs: - date_time: - description: 'Datetime for runner name uniqueness, format: %Y-%m-%d-%H-%M-%S-%3N, example: 2023-02-14-13-00-16-791' - required: true -jobs: - arc-runner-job: - strategy: - fail-fast: false - matrix: - job: [1, 2, 3] - runs-on: arc-runner-${{ inputs.date_time }} - steps: - - run: echo "Hello World!" >> $GITHUB_STEP_SUMMARY From 9b56100f8e9337ea33d6a7a4a0a84fda41bc5ecd Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Mon, 27 Mar 2023 11:19:34 +0200 Subject: [PATCH 155/561] Add labels to autoscaling runner set subresources to allow easier inspection (#2391) Co-authored-by: Tingluo Huang --- .github/workflows/e2e-test-linux-vm.yaml | 38 ++--- .../templates/_helpers.tpl | 1 + .../templates/autoscalingrunnerset.yaml | 1 + .../tests/template_test.go | 4 + .../autoscalinglistener_controller.go | 4 +- .../autoscalingrunnerset_controller.go | 35 ++-- .../autoscalingrunnerset_controller_test.go | 40 +++-- .../ephemeralrunnerset_controller.go | 7 +- .../actions.github.com/resourcebuilder.go | 160 ++++++++++++++---- .../resourcebuilder_test.go | 93 ++++++++++ .../gha-runner-scale-set-controller/README.md | 2 +- 11 files changed, 302 insertions(+), 83 deletions(-) create mode 100644 controllers/actions.github.com/resourcebuilder_test.go diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 9a64405c4c..a054149bc8 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -79,20 +79,20 @@ jobs: echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT count=0 while true; do - POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name) if [ -n "$POD_NAME" ]; then echo "Pod found: $POD_NAME" break fi if [ "$count" -ge 10 ]; then - echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 done - kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems - + - name: Test ARC E2E uses: ./.github/actions/execute-assert-arc-e2e timeout-minutes: 10 @@ -167,18 +167,18 @@ jobs: echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT count=0 while true; do - POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name) if [ -n "$POD_NAME" ]; then echo "Pod found: $POD_NAME" break fi if [ "$count" -ge 10 ]; then - echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 done - kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems - name: Test ARC E2E @@ -254,18 +254,18 @@ jobs: echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT count=0 while true; do - POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name) if [ -n "$POD_NAME" ]; then echo "Pod found: $POD_NAME" break fi if [ "$count" -ge 10 ]; then - echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 done - kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems - name: Test ARC E2E @@ -350,18 +350,18 @@ jobs: echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT count=0 while true; do - POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name) if [ -n "$POD_NAME" ]; then echo "Pod found: $POD_NAME" break fi if [ "$count" -ge 10 ]; then - echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 done - kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems - name: Test ARC E2E @@ -448,18 +448,18 @@ jobs: echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT count=0 while true; do - POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name) if [ -n "$POD_NAME" ]; then echo "Pod found: $POD_NAME" break fi if [ "$count" -ge 10 ]; then - echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 done - kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems - name: Test ARC E2E @@ -540,18 +540,18 @@ jobs: echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT count=0 while true; do - POD_NAME=$(kubectl get pods -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME -o name) + POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name) if [ -n "$POD_NAME" ]; then echo "Pod found: $POD_NAME" break fi if [ "$count" -ge 10 ]; then - echo "Timeout waiting for listener pod with label auto-scaling-runner-set-name=$ARC_NAME" + echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 done - kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l auto-scaling-runner-set-name=$ARC_NAME + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems - name: Test ARC E2E diff --git a/charts/gha-runner-scale-set/templates/_helpers.tpl b/charts/gha-runner-scale-set/templates/_helpers.tpl index babaa4aa82..45e7794535 100644 --- a/charts/gha-runner-scale-set/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set/templates/_helpers.tpl @@ -40,6 +40,7 @@ helm.sh/chart: {{ include "gha-runner-scale-set.chart" . }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/part-of: gha-runner-scale-set {{- end }} {{/* diff --git a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index 526ad12084..455a7d0b33 100644 --- a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -10,6 +10,7 @@ metadata: name: {{ .Release.Name }} namespace: {{ .Release.Namespace }} labels: + app.kubernetes.io/component: "autoscaling-runner-set" {{- include "gha-runner-scale-set.labels" . | nindent 4 }} spec: githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }} diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index c5960ad34c..9c3692eed7 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -311,6 +311,10 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) { assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) + assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/part-of"]) + assert.Equal(t, "autoscaling-runner-set", ars.Labels["app.kubernetes.io/component"]) + assert.NotEmpty(t, ars.Labels["app.kubernetes.io/version"]) + assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret) diff --git a/controllers/actions.github.com/autoscalinglistener_controller.go b/controllers/actions.github.com/autoscalinglistener_controller.go index dec923345a..5509946c85 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller.go +++ b/controllers/actions.github.com/autoscalinglistener_controller.go @@ -523,8 +523,8 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingListener.Namespace, Labels: map[string]string{ - "auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - "auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName, + LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, }, }, Data: data, diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index b833e57d7c..43c13823b1 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -45,9 +45,8 @@ const ( autoscalingRunnerSetOwnerKey = ".metadata.controller" LabelKeyRunnerSpecHash = "runner-spec-hash" autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" - runnerScaleSetIdKey = "runner-scale-set-id" - runnerScaleSetNameKey = "runner-scale-set-name" - runnerScaleSetRunnerGroupNameKey = "runner-scale-set-runner-group-name" + runnerScaleSetIdAnnotationKey = "runner-scale-set-id" + runnerScaleSetNameAnnotationKey = "runner-scale-set-name" ) // AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object @@ -140,7 +139,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, nil } - scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdKey] + scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey] if !ok { // Need to create a new runner scale set on Actions service log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.") @@ -154,14 +153,14 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl } // Make sure the runner group of the scale set is up to date - currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetRunnerGroupNameKey] + currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName] if !ok || (len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 && !strings.EqualFold(currentRunnerGroupName, autoscalingRunnerSet.Spec.RunnerGroup)) { log.Info("AutoScalingRunnerSet runner group changed. Updating the runner scale set.") return r.updateRunnerScaleSetRunnerGroup(ctx, autoscalingRunnerSet, log) } // Make sure the runner scale set name is up to date - currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameKey] + currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameAnnotationKey] if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) { log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.") return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log) @@ -365,12 +364,18 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex if autoscalingRunnerSet.Annotations == nil { autoscalingRunnerSet.Annotations = map[string]string{} } + if autoscalingRunnerSet.Labels == nil { + autoscalingRunnerSet.Labels = map[string]string{} + } - logger.Info("Adding runner scale set ID, name and runner group name as an annotation") + logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels") if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { - obj.Annotations[runnerScaleSetNameKey] = runnerScaleSet.Name - obj.Annotations[runnerScaleSetIdKey] = strconv.Itoa(runnerScaleSet.Id) - obj.Annotations[runnerScaleSetRunnerGroupNameKey] = runnerScaleSet.RunnerGroupName + obj.Annotations[runnerScaleSetNameAnnotationKey] = runnerScaleSet.Name + obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id) + obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName + if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen + logger.Error(err, "Failed to apply GitHub URL labels") + } }); err != nil { logger.Error(err, "Failed to add runner scale set ID, name and runner group name as an annotation") return ctrl.Result{}, err @@ -384,7 +389,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex } func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) { - runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) + runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) if err != nil { logger.Error(err, "Failed to parse runner scale set ID") return ctrl.Result{}, err @@ -415,7 +420,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con logger.Info("Updating runner scale set runner group name as an annotation") if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { - obj.Annotations[runnerScaleSetRunnerGroupNameKey] = updatedRunnerScaleSet.RunnerGroupName + obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = updatedRunnerScaleSet.RunnerGroupName }); err != nil { logger.Error(err, "Failed to update runner group name annotation") return ctrl.Result{}, err @@ -426,7 +431,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con } func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) { - runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) + runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) if err != nil { logger.Error(err, "Failed to parse runner scale set ID") return ctrl.Result{}, err @@ -451,7 +456,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co logger.Info("Updating runner scale set name as an annotation") if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { - obj.Annotations[runnerScaleSetNameKey] = updatedRunnerScaleSet.Name + obj.Annotations[runnerScaleSetNameAnnotationKey] = updatedRunnerScaleSet.Name }); err != nil { logger.Error(err, "Failed to update runner scale set name annotation") return ctrl.Result{}, err @@ -463,7 +468,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error { logger.Info("Deleting the runner scale set from Actions service") - runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) + runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) if err != nil { // If the annotation is not set correctly, or if it does not exist, we are going to get stuck in a loop trying to parse the scale set id. // If the configuration is invalid (secret does not exist for example), we never get to the point to create runner set. But then, manual cleanup diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index 2a5fd7803c..2fd0e61b00 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -117,19 +117,39 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { return "", err } - if _, ok := created.Annotations[runnerScaleSetIdKey]; !ok { + if _, ok := created.Annotations[runnerScaleSetIdAnnotationKey]; !ok { return "", nil } - if _, ok := created.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok { + if _, ok := created.Annotations[AnnotationKeyGitHubRunnerGroupName]; !ok { return "", nil } - return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIdKey], created.Annotations[runnerScaleSetRunnerGroupNameKey]), nil + return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIdAnnotationKey], created.Annotations[AnnotationKeyGitHubRunnerGroupName]), nil }, autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("1_testgroup"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation") + Eventually( + func() (string, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created) + if err != nil { + return "", err + } + + if _, ok := created.Labels[LabelKeyGitHubOrganization]; !ok { + return "", nil + } + + if _, ok := created.Labels[LabelKeyGitHubRepository]; !ok { + return "", nil + } + + return fmt.Sprintf("%s/%s", created.Labels[LabelKeyGitHubOrganization], created.Labels[LabelKeyGitHubRepository]), nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("owner/repo"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's label") + // Check if ephemeral runner set is created Eventually( func() (int, error) { @@ -351,18 +371,18 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { return "", err } - if _, ok := updated.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok { + if _, ok := updated.Annotations[AnnotationKeyGitHubRunnerGroupName]; !ok { return "", nil } - return updated.Annotations[runnerScaleSetRunnerGroupNameKey], nil + return updated.Annotations[AnnotationKeyGitHubRunnerGroupName], nil }, autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the new runner group in its annotation") // delete the annotation and it should be re-added patched = autoscalingRunnerSet.DeepCopy() - delete(patched.Annotations, runnerScaleSetRunnerGroupNameKey) + delete(patched.Annotations, AnnotationKeyGitHubRunnerGroupName) err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") @@ -374,11 +394,11 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { return "", err } - if _, ok := updated.Annotations[runnerScaleSetRunnerGroupNameKey]; !ok { + if _, ok := updated.Annotations[AnnotationKeyGitHubRunnerGroupName]; !ok { return "", nil } - return updated.Annotations[runnerScaleSetRunnerGroupNameKey], nil + return updated.Annotations[AnnotationKeyGitHubRunnerGroupName], nil }, autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestInterval, @@ -539,7 +559,7 @@ var _ = Describe("Test AutoScalingController updates", func() { return "", err } - if val, ok := ars.Annotations[runnerScaleSetNameKey]; ok { + if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok { return val, nil } @@ -562,7 +582,7 @@ var _ = Describe("Test AutoScalingController updates", func() { return "", err } - if val, ok := ars.Annotations[runnerScaleSetNameKey]; ok { + if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok { return val, nil } diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index 27a8a22704..6a90ec78af 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -356,10 +356,9 @@ func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ep ObjectMeta: metav1.ObjectMeta{ Name: proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet), Namespace: ephemeralRunnerSet.Namespace, - Labels: map[string]string{ - // TODO: figure out autoScalingRunnerSet name and set it as a label for this secret - // "auto-scaling-runner-set-namespace": ephemeralRunnerSet.Namespace, - // "auto-scaling-runner-set-name": ephemeralRunnerSet.Name, + Labels: map[string]string{ + LabelKeyGitHubScaleSetName: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName], + LabelKeyGitHubScaleSetNamespace: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace], }, }, Data: proxySecretData, diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go index dd555289f5..4ca6abc695 100644 --- a/controllers/actions.github.com/resourcebuilder.go +++ b/controllers/actions.github.com/resourcebuilder.go @@ -8,6 +8,7 @@ import ( "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/build" + "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/hash" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -19,12 +20,42 @@ const ( jitTokenKey = "jitToken" ) -// labels applied to resources +// Labels applied to resources const ( - LabelKeyAutoScaleRunnerSetName = "auto-scaling-runner-set-name" - LabelKeyAutoScaleRunnerSetNamespace = "auto-scaling-runner-set-namespace" + // Kubernetes labels + LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of" + LabelKeyKubernetesComponent = "app.kubernetes.io/component" + LabelKeyKubernetesVersion = "app.kubernetes.io/version" + + // Github labels + LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name" + LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace" + LabelKeyGitHubEnterprise = "actions.github.com/enterprise" + LabelKeyGitHubOrganization = "actions.github.com/organization" + LabelKeyGitHubRepository = "actions.github.com/repository" ) +const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name" + +// Labels applied to listener roles +const ( + labelKeyListenerName = "auto-scaling-listener-name" + labelKeyListenerNamespace = "auto-scaling-listener-namespace" +) + +var commonLabelKeys = [...]string{ + LabelKeyKubernetesPartOf, + LabelKeyKubernetesComponent, + LabelKeyKubernetesVersion, + LabelKeyGitHubScaleSetName, + LabelKeyGitHubScaleSetNamespace, + LabelKeyGitHubEnterprise, + LabelKeyGitHubOrganization, + LabelKeyGitHubRepository, +} + +const labelValueKubernetesPartOf = "gha-runner-scale-set" + type resourceBuilder struct{} func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod { @@ -129,6 +160,11 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A RestartPolicy: corev1.RestartPolicyNever, } + labels := make(map[string]string, len(autoscalingListener.Labels)) + for key, val := range autoscalingListener.Labels { + labels[key] = val + } + newRunnerScaleSetListenerPod := &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -137,10 +173,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A ObjectMeta: metav1.ObjectMeta{ Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace, - Labels: map[string]string{ - LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, - }, + Labels: labels, }, Spec: podSpec, } @@ -149,14 +182,28 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A } func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) { - runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) + runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) if err != nil { return nil, err } runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() - newLabels := map[string]string{} - newLabels[LabelKeyRunnerSpecHash] = runnerSpecHash + newLabels := map[string]string{ + LabelKeyRunnerSpecHash: runnerSpecHash, + LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, + LabelKeyKubernetesComponent: "runner-set", + LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], + LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, + LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, + } + + if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil { + return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err) + } + + newAnnotations := map[string]string{ + AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], + } newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{ TypeMeta: metav1.TypeMeta{}, @@ -164,6 +211,7 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-", Namespace: autoscalingRunnerSet.ObjectMeta.Namespace, Labels: newLabels, + Annotations: newAnnotations, }, Spec: v1alpha1.EphemeralRunnerSetSpec{ Replicas: 0, @@ -187,8 +235,8 @@ func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace, Labels: map[string]string{ - LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, + LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, }, }, } @@ -202,11 +250,11 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1. Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Labels: map[string]string{ - LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, - "auto-scaling-listener-namespace": autoscalingListener.Namespace, - "auto-scaling-listener-name": autoscalingListener.Name, - "role-policy-rules-hash": rulesHash, + LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, + labelKeyListenerNamespace: autoscalingListener.Namespace, + labelKeyListenerName: autoscalingListener.Name, + "role-policy-rules-hash": rulesHash, }, }, Rules: rules, @@ -236,12 +284,12 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1 Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Labels: map[string]string{ - LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, - "auto-scaling-listener-namespace": autoscalingListener.Namespace, - "auto-scaling-listener-name": autoscalingListener.Name, - "role-binding-role-ref-hash": roleRefHash, - "role-binding-subject-hash": subjectHash, + LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, + labelKeyListenerNamespace: autoscalingListener.Namespace, + labelKeyListenerName: autoscalingListener.Name, + "role-binding-role-ref-hash": roleRefHash, + "role-binding-subject-hash": subjectHash, }, }, RoleRef: roleRef, @@ -259,9 +307,9 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v Name: scaleSetListenerSecretMirrorName(autoscalingListener), Namespace: autoscalingListener.Namespace, Labels: map[string]string{ - LabelKeyAutoScaleRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - LabelKeyAutoScaleRunnerSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, - "secret-data-hash": dataHash, + LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, + LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, + "secret-data-hash": dataHash, }, }, Data: secret.DeepCopy().Data, @@ -271,7 +319,7 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v } func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) { - runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]) + runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) if err != nil { return nil, err } @@ -285,14 +333,25 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1. effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners } + githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl) + if err != nil { + return nil, fmt.Errorf("failed to parse github config from url: %v", err) + } + autoscalingListener := &v1alpha1.AutoscalingListener{ ObjectMeta: metav1.ObjectMeta{ Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: namespace, Labels: map[string]string{ - LabelKeyAutoScaleRunnerSetNamespace: autoscalingRunnerSet.Namespace, - LabelKeyAutoScaleRunnerSetName: autoscalingRunnerSet.Name, - LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), + LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, + LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, + LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, + LabelKeyKubernetesComponent: "runner-scale-set-listener", + LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], + LabelKeyGitHubEnterprise: githubConfig.Enterprise, + LabelKeyGitHubOrganization: githubConfig.Organization, + LabelKeyGitHubRepository: githubConfig.Repository, + LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), }, }, Spec: v1alpha1.AutoscalingListenerSpec{ @@ -315,11 +374,30 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1. } func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner { + labels := make(map[string]string) + for _, key := range commonLabelKeys { + switch key { + case LabelKeyKubernetesComponent: + labels[key] = "runner" + default: + v, ok := ephemeralRunnerSet.Labels[key] + if !ok { + continue + } + labels[key] = v + } + } + annotations := make(map[string]string) + for key, val := range ephemeralRunnerSet.Annotations { + annotations[key] = val + } return &v1alpha1.EphemeralRunner{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ GenerateName: ephemeralRunnerSet.Name + "-runner-", Namespace: ephemeralRunnerSet.Namespace, + Labels: labels, + Annotations: annotations, }, Spec: ephemeralRunnerSet.Spec.EphemeralRunnerSpec, } @@ -337,6 +415,7 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a for k, v := range runner.Spec.PodTemplateSpec.Labels { labels[k] = v } + labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue) for k, v := range runner.ObjectMeta.Annotations { annotations[k] = v @@ -352,8 +431,6 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a runner.Status.RunnerJITConfig, ) - labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue) - objectMeta := metav1.ObjectMeta{ Name: runner.ObjectMeta.Name, Namespace: runner.ObjectMeta.Namespace, @@ -469,3 +546,22 @@ func rulesForListenerRole(resourceNames []string) []rbacv1.PolicyRule { }, } } + +func applyGitHubURLLabels(url string, labels map[string]string) error { + githubConfig, err := actions.ParseGitHubConfigFromURL(url) + if err != nil { + return fmt.Errorf("failed to parse github config from url: %v", err) + } + + if len(githubConfig.Enterprise) > 0 { + labels[LabelKeyGitHubEnterprise] = githubConfig.Enterprise + } + if len(githubConfig.Organization) > 0 { + labels[LabelKeyGitHubOrganization] = githubConfig.Organization + } + if len(githubConfig.Repository) > 0 { + labels[LabelKeyGitHubRepository] = githubConfig.Repository + } + + return nil +} diff --git a/controllers/actions.github.com/resourcebuilder_test.go b/controllers/actions.github.com/resourcebuilder_test.go new file mode 100644 index 0000000000..e41d798006 --- /dev/null +++ b/controllers/actions.github.com/resourcebuilder_test.go @@ -0,0 +1,93 @@ +package actionsgithubcom + +import ( + "context" + "testing" + + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestLabelPropagation(t *testing.T) { + autoscalingRunnerSet := v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-scale-set", + Namespace: "test-ns", + Labels: map[string]string{ + LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, + LabelKeyKubernetesVersion: "0.2.0", + }, + Annotations: map[string]string{ + runnerScaleSetIdAnnotationKey: "1", + AnnotationKeyGitHubRunnerGroupName: "test-group", + }, + }, + Spec: v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/org/repo", + }, + } + + var b resourceBuilder + ephemeralRunnerSet, err := b.newEphemeralRunnerSet(&autoscalingRunnerSet) + require.NoError(t, err) + assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf]) + assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent]) + assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion]) + assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash]) + assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName]) + assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace]) + assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise]) + assert.Equal(t, "org", ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization]) + assert.Equal(t, "repo", ephemeralRunnerSet.Labels[LabelKeyGitHubRepository]) + assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName]) + + listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil) + require.NoError(t, err) + assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf]) + assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent]) + assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion]) + assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash]) + assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName]) + assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace]) + assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise]) + assert.Equal(t, "org", listener.Labels[LabelKeyGitHubOrganization]) + assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository]) + + listenerServiceAccount := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + } + listenerSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + } + listenerPod := b.newScaleSetListenerPod(listener, listenerServiceAccount, listenerSecret) + assert.Equal(t, listenerPod.Labels, listener.Labels) + + ephemeralRunner := b.newEphemeralRunner(ephemeralRunnerSet) + require.NoError(t, err) + + for _, key := range commonLabelKeys { + if key == LabelKeyKubernetesComponent { + continue + } + assert.Equal(t, ephemeralRunnerSet.Labels[key], ephemeralRunner.Labels[key]) + } + assert.Equal(t, "runner", ephemeralRunner.Labels[LabelKeyKubernetesComponent]) + assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunner.Annotations[AnnotationKeyGitHubRunnerGroupName]) + + runnerSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + } + pod := b.newEphemeralRunnerPod(context.TODO(), ephemeralRunner, runnerSecret) + for key := range ephemeralRunner.Labels { + assert.Equal(t, ephemeralRunner.Labels[key], pod.Labels[key]) + } +} diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index 31ed62cd1f..3c6f5e5099 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -160,7 +160,7 @@ kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-runner-scale-set-co ```bash # Runner set listener logs -kubectl logs -n "${NAMESPACE}" -l auto-scaling-runner-set-namespace=arc-systems -l auto-scaling-runner-set-name=arc-runner-set +kubectl logs -n "${NAMESPACE}" -l actions.github.com/scale-set-namespace=arc-systems -l actions.github.com/scale-set-name=arc-runner-set ``` ### Naming error: `Name must have up to characters` From b99c45aa98d04f348a032c94eb20c6a24473eed0 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 27 Mar 2023 16:38:27 +0200 Subject: [PATCH 156/561] Housekeeping: move `adrs/` to `docs/` and update status (#2443) Co-authored-by: Francesco Renzi --- .../adrs}/2022-10-17-runner-image.md | 0 .../2022-10-27-runnerscaleset-lifetime.md | 0 .../adrs}/2022-11-04-crd-api-group-name.md | 0 .../2022-12-05-adding-labels-k8s-resources.md | 0 ...-27-pick-the-right-runner-to-scale-down.md | 0 .../2023-02-02-automate-runner-updates.md | 2 +- ...023-02-10-limit-manager-role-permission.md | 2 +- docs/adrs/2023-03-17-workflow-improvements.md | 84 +++++++++++++++++++ .../2023-04-14-adding-labels-k8s-resources.md | 2 +- {adrs => docs/adrs}/yyyy-mm-dd-TEMPLATE.md | 0 10 files changed, 87 insertions(+), 3 deletions(-) rename {adrs => docs/adrs}/2022-10-17-runner-image.md (100%) rename {adrs => docs/adrs}/2022-10-27-runnerscaleset-lifetime.md (100%) rename {adrs => docs/adrs}/2022-11-04-crd-api-group-name.md (100%) rename {adrs => docs/adrs}/2022-12-05-adding-labels-k8s-resources.md (100%) rename {adrs => docs/adrs}/2022-12-27-pick-the-right-runner-to-scale-down.md (100%) rename {adrs => docs/adrs}/2023-02-02-automate-runner-updates.md (98%) rename {adrs => docs/adrs}/2023-02-10-limit-manager-role-permission.md (99%) create mode 100644 docs/adrs/2023-03-17-workflow-improvements.md rename {adrs => docs/adrs}/2023-04-14-adding-labels-k8s-resources.md (97%) rename {adrs => docs/adrs}/yyyy-mm-dd-TEMPLATE.md (100%) diff --git a/adrs/2022-10-17-runner-image.md b/docs/adrs/2022-10-17-runner-image.md similarity index 100% rename from adrs/2022-10-17-runner-image.md rename to docs/adrs/2022-10-17-runner-image.md diff --git a/adrs/2022-10-27-runnerscaleset-lifetime.md b/docs/adrs/2022-10-27-runnerscaleset-lifetime.md similarity index 100% rename from adrs/2022-10-27-runnerscaleset-lifetime.md rename to docs/adrs/2022-10-27-runnerscaleset-lifetime.md diff --git a/adrs/2022-11-04-crd-api-group-name.md b/docs/adrs/2022-11-04-crd-api-group-name.md similarity index 100% rename from adrs/2022-11-04-crd-api-group-name.md rename to docs/adrs/2022-11-04-crd-api-group-name.md diff --git a/adrs/2022-12-05-adding-labels-k8s-resources.md b/docs/adrs/2022-12-05-adding-labels-k8s-resources.md similarity index 100% rename from adrs/2022-12-05-adding-labels-k8s-resources.md rename to docs/adrs/2022-12-05-adding-labels-k8s-resources.md diff --git a/adrs/2022-12-27-pick-the-right-runner-to-scale-down.md b/docs/adrs/2022-12-27-pick-the-right-runner-to-scale-down.md similarity index 100% rename from adrs/2022-12-27-pick-the-right-runner-to-scale-down.md rename to docs/adrs/2022-12-27-pick-the-right-runner-to-scale-down.md diff --git a/adrs/2023-02-02-automate-runner-updates.md b/docs/adrs/2023-02-02-automate-runner-updates.md similarity index 98% rename from adrs/2023-02-02-automate-runner-updates.md rename to docs/adrs/2023-02-02-automate-runner-updates.md index 393e78996c..c3bb5c4df4 100644 --- a/adrs/2023-02-02-automate-runner-updates.md +++ b/docs/adrs/2023-02-02-automate-runner-updates.md @@ -2,7 +2,7 @@ **Date**: 2023-02-02 -**Status**: Proposed +**Status**: Done ## Context diff --git a/adrs/2023-02-10-limit-manager-role-permission.md b/docs/adrs/2023-02-10-limit-manager-role-permission.md similarity index 99% rename from adrs/2023-02-10-limit-manager-role-permission.md rename to docs/adrs/2023-02-10-limit-manager-role-permission.md index 803a4ec967..d327b4f874 100644 --- a/adrs/2023-02-10-limit-manager-role-permission.md +++ b/docs/adrs/2023-02-10-limit-manager-role-permission.md @@ -2,7 +2,7 @@ **Date**: 2023-02-10 -**Status**: Pending +**Status**: Done ## Context diff --git a/docs/adrs/2023-03-17-workflow-improvements.md b/docs/adrs/2023-03-17-workflow-improvements.md new file mode 100644 index 0000000000..38d611aa24 --- /dev/null +++ b/docs/adrs/2023-03-17-workflow-improvements.md @@ -0,0 +1,84 @@ +# Improve ARC workflows for autoscaling runner sets + +**Date**: 2023-03-17 + +**Status**: Done + +## Context + +In the [actions-runner-controller](https://github.com/actions/actions-runner-controller) +repository we essentially have two projects living side by side: the "legacy" +actions-runner-controller and the new one GitHub is supporting +(gha-runner-scale-set). To hasten progress we relied on existing workflows and +added some of our own (e.g.: end-to-end tests). We now got to a point where it's +sort of confusing what does what and why, not to mention the increased running +times of some those workflows and some GHA-related flaky tests getting in the +way of legacy ARC and viceversa. The three main areas we want to cover are: Go +code, Kubernetes manifests / Helm charts and E2E tests. + +## Go code + +At the moment we have three workflows that validate Go code: + +- [golangci-lint](https://github.com/actions/actions-runner-controller/blob/34f3878/.github/workflows/golangci-lint.yaml): + this is a collection of linters that currently runs on all PRs and push to + master +- [Validate ARC](https://github.com/actions/actions-runner-controller/blob/01e9dd3/.github/workflows/validate-arc.yaml): + this is a bit of a catch-all workflow, other than Go tests this also validates + Kubernetes manifests, runs `go generate`, `go fmt` and `go vet` +- [Run CodeQL](https://github.com/actions/actions-runner-controller/blob/a095f0b66aad5fbc8aa8d7032f3299233e4c84d2/.github/workflows/run-codeql.yaml) + +### Proposal + +I think having one `Go` workflow that collects everything-Go would help a ton with +reliability and understandability of what's going on. This shouldn't be limited +to the GHA-supported mode as there are changes that even if made outside the GHA +code base could affect us (such as a dependency update). +This workflow should only run on changes to `*.go` files, `go.mod` and `go.sum`. +It should have these jobs, aiming to cover all existing functionality and +eliminate some duplication: + +- `test`: run all Go tests in the project. We currently use the `-short` and + `-coverprofile` flags: while `-short` is used to skip [old ARC E2E + tests](https://github.com/actions/actions-runner-controller/blob/master/test/e2e/e2e_test.go#L85-L87), + `-coverprofile` is adding to the test time without really giving us any value + in return. We should also start using `actions/setup-go@v4` to take advantage + of caching (it would speed up our tests by a lot) or enable it on `v3` if we + have a strong reason not to upgrade. We should keep ignoring our E2E tests too + as those will be run elsewhere (either use `Short` there too or ignoring the + package like we currently do). As a dependency for tests this needs to run + `make manifests` first: we should fail there and then if there is a diff. +- `fmt`: we currently run `go fmt ./...` as part of `Validate ARC` but do + nothing with the results. We should fail in case of a diff. We don't need + caching for this job. +- `lint`: this corresponds to what's currently the `golanci-lint` workflow (this + also covers `go vet` which currently happens as part of `Validate ARC too`) +- `generate`: the current behaviour for this is actually quite risky, we + generate our code in `Validate ARC` workflow and use the results to run the + tests but we don't validate that up to date generate code is checked in. This + job should run `go generate` and fail on a diff. +- `vulncheck`: **EDIT: this is covered by CodeQL** the Go team is maintaining [`govulncheck`](https://go.dev/blog/vuln), a tool to recursively + analyzing all function calls in Go code and spot vulnerabilities on the call + stack. + +## Kubernetes manifests / Helm charts + +We have [recently separated](https://github.com/actions/actions-runner-controller/commit/bd9f32e3540663360cf47f04acad26e6010f772e) +Helm chart validation and we validate up-to-dateness of manifests as part of `Go +/ test`. + +## End to end tests + +These tests are giving us really good coverage and should be one of the main +actors when it comes to trusting our releases. Two improvements that could be +done here are: + +- renaming the workflow to `GHA E2E`: since renaming our resources the `gha` + prefix has been used to identify things related to the mode GitHub supports + and these jobs strictly validate the GitHub mode _only_. Having a shorter name + allows for more readability of the various scenarios (e.g. `GHA E2E / + single-namespace-setup`). +- the test currently monitors and validates the number of pods spawning during + the workflow but not the outcome of the workflow. While not necessary to look + at pods specifics, we should at least guarantee that the workflow can + successfully conclude. diff --git a/adrs/2023-04-14-adding-labels-k8s-resources.md b/docs/adrs/2023-04-14-adding-labels-k8s-resources.md similarity index 97% rename from adrs/2023-04-14-adding-labels-k8s-resources.md rename to docs/adrs/2023-04-14-adding-labels-k8s-resources.md index 737e0653c8..6a2eb1843b 100644 --- a/adrs/2023-04-14-adding-labels-k8s-resources.md +++ b/docs/adrs/2023-04-14-adding-labels-k8s-resources.md @@ -86,4 +86,4 @@ Or for example if they're having problems specifically with runners: This way users don't have to understand ARC moving parts but we still have a way to target them specifically if we need to. -[^1]: [ADR 2022-12-05](2022-12-05-adding-labels-k8s-resources.md) +[^1]: Supersedes [ADR 2022-12-05](2022-12-05-adding-labels-k8s-resources.md) \ No newline at end of file diff --git a/adrs/yyyy-mm-dd-TEMPLATE.md b/docs/adrs/yyyy-mm-dd-TEMPLATE.md similarity index 100% rename from adrs/yyyy-mm-dd-TEMPLATE.md rename to docs/adrs/yyyy-mm-dd-TEMPLATE.md From 36efd2dc9df744c04cedbac5576e63f7a525f577 Mon Sep 17 00:00:00 2001 From: Zane Hala Date: Mon, 27 Mar 2023 18:42:20 -0500 Subject: [PATCH 157/561] chart: Allow customization of admission webhook timeout (#2398) Co-authored-by: Yusuke Kuoka --- acceptance/deploy.sh | 3 +++ .../templates/webhook_configs.yaml | 7 +++++++ test/e2e/e2e_test.go | 3 +++ 3 files changed, 13 insertions(+) diff --git a/acceptance/deploy.sh b/acceptance/deploy.sh index 2b076010ca..3435086af5 100755 --- a/acceptance/deploy.sh +++ b/acceptance/deploy.sh @@ -69,6 +69,9 @@ if [ "${tool}" == "helm" ]; then flags+=( --set githubWebhookServer.logFormat=${LOG_FORMAT}) flags+=( --set actionsMetricsServer.logFormat=${LOG_FORMAT}) fi + if [ "${ADMISSION_WEBHOOKS_TIMEOUT}" != "" ]; then + flags+=( --set admissionWebHooks.timeoutSeconds=${ADMISSION_WEBHOOKS_TIMEOUT}) + fi if [ -n "${CREATE_SECRETS_USING_HELM}" ]; then if [ -z "${WEBHOOK_GITHUB_TOKEN}" ]; then echo 'Failed deploying secret "actions-metrics-server" using helm. Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2 diff --git a/charts/actions-runner-controller/templates/webhook_configs.yaml b/charts/actions-runner-controller/templates/webhook_configs.yaml index 7a71735d68..89cb273621 100644 --- a/charts/actions-runner-controller/templates/webhook_configs.yaml +++ b/charts/actions-runner-controller/templates/webhook_configs.yaml @@ -44,6 +44,7 @@ webhooks: resources: - runners sideEffects: None + timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}} - admissionReviewVersions: - v1beta1 {{- if .Values.scope.singleNamespace }} @@ -74,6 +75,7 @@ webhooks: resources: - runnerdeployments sideEffects: None + timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}} - admissionReviewVersions: - v1beta1 {{- if .Values.scope.singleNamespace }} @@ -104,6 +106,7 @@ webhooks: resources: - runnerreplicasets sideEffects: None + timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}} - admissionReviewVersions: - v1beta1 {{- if .Values.scope.singleNamespace }} @@ -136,6 +139,7 @@ webhooks: objectSelector: matchLabels: "actions-runner-controller/inject-registration-token": "true" + timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}} --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration @@ -177,6 +181,7 @@ webhooks: resources: - runners sideEffects: None + timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}} - admissionReviewVersions: - v1beta1 {{- if .Values.scope.singleNamespace }} @@ -207,6 +212,7 @@ webhooks: resources: - runnerdeployments sideEffects: None + timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}} - admissionReviewVersions: - v1beta1 {{- if .Values.scope.singleNamespace }} @@ -238,6 +244,7 @@ webhooks: - runnerreplicasets sideEffects: None {{ if not (or (hasKey .Values.admissionWebHooks "caBundle") .Values.certManagerEnabled) }} + timeoutSeconds: {{ .Values.admissionWebHooks.timeoutSeconds | default 10}} --- apiVersion: v1 kind: Secret diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 66ac1c0393..c73699787b 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -413,6 +413,7 @@ type env struct { runnerNamespace string logFormat string remoteKubeconfig string + admissionWebhooksTimeout string imagePullSecretName string imagePullPolicy string @@ -547,6 +548,7 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env { e.runnerNamespace = testing.Getenv(t, "TEST_RUNNER_NAMESPACE", "default") e.logFormat = testing.Getenv(t, "ARC_E2E_LOG_FORMAT", "") e.remoteKubeconfig = testing.Getenv(t, "ARC_E2E_REMOTE_KUBECONFIG", "") + e.admissionWebhooksTimeout = testing.Getenv(t, "ARC_E2E_ADMISSION_WEBHOOKS_TIMEOUT", "") e.imagePullSecretName = testing.Getenv(t, "ARC_E2E_IMAGE_PULL_SECRET_NAME", "") e.vars = vars @@ -724,6 +726,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, ch "TEST_ID=" + testID, "NAME=" + repo, "VERSION=" + tag, + "ADMISSION_WEBHOOKS_TIMEOUT=" + e.admissionWebhooksTimeout, "IMAGE_PULL_SECRET=" + e.imagePullSecretName, "IMAGE_PULL_POLICY=" + e.imagePullPolicy, } From 220fa9d6d40f01c5f038d5f56eb49b8137698d0d Mon Sep 17 00:00:00 2001 From: cskinfill Date: Mon, 27 Mar 2023 19:43:01 -0400 Subject: [PATCH 158/561] crd: Add enterprise, organization, repository, and runner labels to runnerdeployments print columns (#2310) Co-authored-by: Yusuke Kuoka --- .../v1alpha1/runnerdeployment_types.go | 5 +++++ .../actions.summerwind.dev_runnerdeployments.yaml | 15 +++++++++++++++ .../actions.summerwind.dev_runnerdeployments.yaml | 15 +++++++++++++++ 3 files changed, 35 insertions(+) diff --git a/apis/actions.summerwind.net/v1alpha1/runnerdeployment_types.go b/apis/actions.summerwind.net/v1alpha1/runnerdeployment_types.go index 693dbaec7e..eabd2bb41f 100644 --- a/apis/actions.summerwind.net/v1alpha1/runnerdeployment_types.go +++ b/apis/actions.summerwind.net/v1alpha1/runnerdeployment_types.go @@ -77,6 +77,11 @@ type RunnerDeploymentStatus struct { // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=rdeploy // +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.enterprise",name=Enterprise,type=string +// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.organization",name=Organization,type=string +// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.repository",name=Repository,type=string +// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.group",name=Group,type=string +// +kubebuilder:printcolumn:JSONPath=".spec.template.spec.labels",name=Labels,type=string // +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number // +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number // +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number diff --git a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml index f3254b9876..cb5f54ae55 100644 --- a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml +++ b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml @@ -17,6 +17,21 @@ spec: scope: Namespaced versions: - additionalPrinterColumns: + - jsonPath: .spec.template.spec.enterprise + name: Enterprise + type: string + - jsonPath: .spec.template.spec.organization + name: Organization + type: string + - jsonPath: .spec.template.spec.repository + name: Repository + type: string + - jsonPath: .spec.template.spec.group + name: Group + type: string + - jsonPath: .spec.template.spec.labels + name: Labels + type: string - jsonPath: .spec.replicas name: Desired type: number diff --git a/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml b/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml index f3254b9876..cb5f54ae55 100644 --- a/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml +++ b/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml @@ -17,6 +17,21 @@ spec: scope: Namespaced versions: - additionalPrinterColumns: + - jsonPath: .spec.template.spec.enterprise + name: Enterprise + type: string + - jsonPath: .spec.template.spec.organization + name: Organization + type: string + - jsonPath: .spec.template.spec.repository + name: Repository + type: string + - jsonPath: .spec.template.spec.group + name: Group + type: string + - jsonPath: .spec.template.spec.labels + name: Labels + type: string - jsonPath: .spec.replicas name: Desired type: number From 1dfd9473a2d81bcbeb18aa74448cb1a9baa5c479 Mon Sep 17 00:00:00 2001 From: Waldek Herka <87032474+wherka-ama@users.noreply.github.com> Date: Tue, 28 Mar 2023 01:43:33 +0200 Subject: [PATCH 159/561] chart: Restricting the RBAC rules on secrets (#2265) Co-authored-by: Waldek Herka Co-authored-by: Yusuke Kuoka --- acceptance/deploy.sh | 3 +++ .../templates/manager_role.yaml | 15 ------------ .../manager_role_binding_secrets.yaml | 21 ++++++++++++++++ .../templates/manager_role_secrets.yaml | 24 +++++++++++++++++++ test/e2e/e2e_test.go | 4 ++++ 5 files changed, 52 insertions(+), 15 deletions(-) create mode 100644 charts/actions-runner-controller/templates/manager_role_binding_secrets.yaml create mode 100644 charts/actions-runner-controller/templates/manager_role_secrets.yaml diff --git a/acceptance/deploy.sh b/acceptance/deploy.sh index 3435086af5..c5bb786214 100755 --- a/acceptance/deploy.sh +++ b/acceptance/deploy.sh @@ -61,6 +61,9 @@ if [ "${tool}" == "helm" ]; then flags+=( --set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET}) flags+=( --set actionsMetricsServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET}) fi + if [ "${WATCH_NAMESPACE}" != "" ]; then + flags+=( --set watchNamespace=${WATCH_NAMESPACE} --set singleNamespace=true) + fi if [ "${CHART_VERSION}" != "" ]; then flags+=( --version ${CHART_VERSION}) fi diff --git a/charts/actions-runner-controller/templates/manager_role.yaml b/charts/actions-runner-controller/templates/manager_role.yaml index cd0a374f37..bd213909eb 100644 --- a/charts/actions-runner-controller/templates/manager_role.yaml +++ b/charts/actions-runner-controller/templates/manager_role.yaml @@ -250,14 +250,6 @@ rules: - patch - update - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch {{- if .Values.runner.statusUpdateHook.enabled }} - apiGroups: - "" @@ -311,11 +303,4 @@ rules: - list - create - delete -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete {{- end }} diff --git a/charts/actions-runner-controller/templates/manager_role_binding_secrets.yaml b/charts/actions-runner-controller/templates/manager_role_binding_secrets.yaml new file mode 100644 index 0000000000..9b7132cf85 --- /dev/null +++ b/charts/actions-runner-controller/templates/manager_role_binding_secrets.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +{{- if .Values.scope.singleNamespace }} +kind: RoleBinding +{{- else }} +kind: ClusterRoleBinding +{{- end }} +metadata: + name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + {{- if .Values.scope.singleNamespace }} + kind: Role + {{- else }} + kind: ClusterRole + {{- end }} + name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets +subjects: +- kind: ServiceAccount + name: {{ include "actions-runner-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} diff --git a/charts/actions-runner-controller/templates/manager_role_secrets.yaml b/charts/actions-runner-controller/templates/manager_role_secrets.yaml new file mode 100644 index 0000000000..38037c833b --- /dev/null +++ b/charts/actions-runner-controller/templates/manager_role_secrets.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +{{- if .Values.scope.singleNamespace }} +kind: Role +{{- else }} +kind: ClusterRole +{{- end }} +metadata: + creationTimestamp: null + name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +{{- if .Values.rbac.allowGrantingKubernetesContainerModePermissions }} +{{/* These permissions are required by ARC to create RBAC resources for the runner pod to use the kubernetes container mode. */}} +{{/* See https://github.com/actions/actions-runner-controller/pull/1268/files#r917331632 */}} + - create + - delete +{{- end }} \ No newline at end of file diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index c73699787b..c8c21270db 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -416,6 +416,7 @@ type env struct { admissionWebhooksTimeout string imagePullSecretName string imagePullPolicy string + watchNamespace string vars vars VerifyTimeout time.Duration @@ -558,6 +559,8 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env { e.imagePullPolicy = "IfNotPresent" } + e.watchNamespace = testing.Getenv(t, "TEST_WATCH_NAMESPACE", "") + if e.remoteKubeconfig == "" { e.Kind = testing.StartKind(t, k8sMinorVer, testing.Preload(images...)) e.Env.Kubeconfig = e.Kind.Kubeconfig() @@ -729,6 +732,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, ch "ADMISSION_WEBHOOKS_TIMEOUT=" + e.admissionWebhooksTimeout, "IMAGE_PULL_SECRET=" + e.imagePullSecretName, "IMAGE_PULL_POLICY=" + e.imagePullPolicy, + "WATCH_NAMESPACE=" + e.watchNamespace, } if e.useApp { From 110737157b153deaa9f9e69501303efbaba68a21 Mon Sep 17 00:00:00 2001 From: Jonathan Wiemers Date: Tue, 28 Mar 2023 04:18:07 +0200 Subject: [PATCH 160/561] chart: Allow webhook server env to be set individually (#2377) Co-authored-by: Yusuke Kuoka --- acceptance/deploy.sh | 4 +++ .../templates/githubwebhook.deployment.yaml | 4 +++ charts/actions-runner-controller/values.yaml | 13 +++++++++ test/e2e/e2e_test.go | 29 +++++++++++++++++-- 4 files changed, 47 insertions(+), 3 deletions(-) diff --git a/acceptance/deploy.sh b/acceptance/deploy.sh index c5bb786214..eaa5518786 100755 --- a/acceptance/deploy.sh +++ b/acceptance/deploy.sh @@ -83,6 +83,10 @@ if [ "${tool}" == "helm" ]; then flags+=( --set actionsMetricsServer.secret.create=true) flags+=( --set actionsMetricsServer.secret.github_token=${WEBHOOK_GITHUB_TOKEN}) fi + if [ -n "${GITHUB_WEBHOOK_SERVER_ENV_NAME}" ] && [ -n "${GITHUB_WEBHOOK_SERVER_ENV_VALUE}" ]; then + flags+=( --set githubWebhookServer.env[0].name=${GITHUB_WEBHOOK_SERVER_ENV_NAME}) + flags+=( --set githubWebhookServer.env[0].value=${GITHUB_WEBHOOK_SERVER_ENV_VALUE}) + fi set -vx diff --git a/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml b/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml index b119ff1ddf..d778cba876 100644 --- a/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml +++ b/charts/actions-runner-controller/templates/githubwebhook.deployment.yaml @@ -117,10 +117,14 @@ spec: name: {{ include "actions-runner-controller.secretName" . }} optional: true {{- end }} + {{- if kindIs "slice" .Values.githubWebhookServer.env }} + {{- toYaml .Values.githubWebhookServer.env | nindent 8 }} + {{- else }} {{- range $key, $val := .Values.githubWebhookServer.env }} - name: {{ $key }} value: {{ $val | quote }} {{- end }} + {{- end }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}" name: github-webhook-server imagePullPolicy: {{ .Values.image.pullPolicy }} diff --git a/charts/actions-runner-controller/values.yaml b/charts/actions-runner-controller/values.yaml index 91dec2bee9..b5ea7f077c 100644 --- a/charts/actions-runner-controller/values.yaml +++ b/charts/actions-runner-controller/values.yaml @@ -279,6 +279,19 @@ githubWebhookServer: # queueLimit: 100 terminationGracePeriodSeconds: 10 lifecycle: {} + # specify additional environment variables for the webhook server pod. + # It's possible to specify either key vale pairs e.g.: + # my_env_var: "some value" + # my_other_env_var: "other value" + + # or a list of complete environment variable definitions e.g.: + # - name: GITHUB_WEBHOOK_SECRET_TOKEN + # valueFrom: + # secretKeyRef: + # key: GITHUB_WEBHOOK_SECRET_TOKEN + # name: prod-gha-controller-webhook-token + # optional: true + env: {} actionsMetrics: serviceAnnotations: {} diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index c8c21270db..48e5fc2dfa 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -101,6 +101,7 @@ func TestE2E(t *testing.T) { label string controller, controllerVer string chart, chartVer string + opt []InstallARCOption }{ { label: "stable", @@ -117,6 +118,12 @@ func TestE2E(t *testing.T) { controllerVer: vars.controllerImageTag, chart: "", chartVer: "", + opt: []InstallARCOption{ + func(ia *InstallARCConfig) { + ia.GithubWebhookServerEnvName = "FOO" + ia.GithubWebhookServerEnvValue = "foo" + }, + }, }, } @@ -186,7 +193,7 @@ func TestE2E(t *testing.T) { for i, v := range testedVersions { t.Run("install actions-runner-controller "+v.label, func(t *testing.T) { t.Logf("Using controller %s:%s and chart %s:%s", v.controller, v.controllerVer, v.chart, v.chartVer) - env.installActionsRunnerController(t, v.controller, v.controllerVer, testID, v.chart, v.chartVer) + env.installActionsRunnerController(t, v.controller, v.controllerVer, testID, v.chart, v.chartVer, v.opt...) }) if t.Failed() { @@ -300,7 +307,7 @@ func TestE2E(t *testing.T) { for i, v := range testedVersions { t.Run("install actions-runner-controller "+v.label, func(t *testing.T) { t.Logf("Using controller %s:%s and chart %s:%s", v.controller, v.controllerVer, v.chart, v.chartVer) - env.installActionsRunnerController(t, v.controller, v.controllerVer, testID, v.chart, v.chartVer) + env.installActionsRunnerController(t, v.controller, v.controllerVer, testID, v.chart, v.chartVer, v.opt...) }) if t.Failed() { @@ -711,9 +718,20 @@ func (e *env) installCertManager(t *testing.T) { e.KubectlWaitUntilDeployAvailable(t, "cert-manager", waitCfg.WithTimeout(60*time.Second)) } -func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, chart, chartVer string) { +type InstallARCConfig struct { + GithubWebhookServerEnvName, GithubWebhookServerEnvValue string +} + +type InstallARCOption func(*InstallARCConfig) + +func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, chart, chartVer string, opts ...InstallARCOption) { t.Helper() + var c InstallARCConfig + for _, opt := range opts { + opt(&c) + } + e.createControllerNamespaceAndServiceAccount(t) scriptEnv := []string{ @@ -755,6 +773,11 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, ch ) } + varEnv = append(varEnv, + "GITHUB_WEBHOOK_SERVER_ENV_NAME="+c.GithubWebhookServerEnvName, + "GITHUB_WEBHOOK_SERVER_ENV_VALUE="+c.GithubWebhookServerEnvValue, + ) + scriptEnv = append(scriptEnv, varEnv...) scriptEnv = append(scriptEnv, e.vars.commonScriptEnv...) From dc7fc7fde35e790d17512ca9318b26e57ad2cf2a Mon Sep 17 00:00:00 2001 From: Milas Bowman Date: Mon, 27 Mar 2023 22:29:16 -0400 Subject: [PATCH 161/561] runner: Use Docker socket via shared emptyDir instead of TCP/mTLS (#2324) Co-authored-by: Yusuke Kuoka --- .../new_runner_pod_test.go | 116 ++++++++++-------- .../runner_controller.go | 103 ++++++++++------ test/e2e/e2e_test.go | 19 ++- 3 files changed, 143 insertions(+), 95 deletions(-) diff --git a/controllers/actions.summerwind.net/new_runner_pod_test.go b/controllers/actions.summerwind.net/new_runner_pod_test.go index 4d3b419d74..fb9b6653a8 100644 --- a/controllers/actions.summerwind.net/new_runner_pod_test.go +++ b/controllers/actions.summerwind.net/new_runner_pod_test.go @@ -76,9 +76,12 @@ func TestNewRunnerPod(t *testing.T) { }, }, { - Name: "certs-client", + Name: "docker-sock", VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: resource.NewScaledQuantity(1, resource.Mega), + }, }, }, }, @@ -137,15 +140,7 @@ func TestNewRunnerPod(t *testing.T) { }, { Name: "DOCKER_HOST", - Value: "tcp://localhost:2376", - }, - { - Name: "DOCKER_TLS_VERIFY", - Value: "1", - }, - { - Name: "DOCKER_CERT_PATH", - Value: "/certs/client", + Value: "unix:///run/docker/docker.sock", }, }, VolumeMounts: []corev1.VolumeMount{ @@ -158,9 +153,8 @@ func TestNewRunnerPod(t *testing.T) { MountPath: "/runner/_work", }, { - Name: "certs-client", - MountPath: "/certs/client", - ReadOnly: true, + Name: "docker-sock", + MountPath: "/run/docker", }, }, ImagePullPolicy: corev1.PullAlways, @@ -169,10 +163,15 @@ func TestNewRunnerPod(t *testing.T) { { Name: "docker", Image: "default-docker-image", + Args: []string{ + "dockerd", + "--host=unix:///run/docker/docker.sock", + "--group=$(DOCKER_GROUP_GID)", + }, Env: []corev1.EnvVar{ { - Name: "DOCKER_TLS_CERTDIR", - Value: "/certs", + Name: "DOCKER_GROUP_GID", + Value: "121", }, }, VolumeMounts: []corev1.VolumeMount{ @@ -181,8 +180,8 @@ func TestNewRunnerPod(t *testing.T) { MountPath: "/runner", }, { - Name: "certs-client", - MountPath: "/certs/client", + Name: "docker-sock", + MountPath: "/run/docker", }, { Name: "work", @@ -485,9 +484,12 @@ func TestNewRunnerPod(t *testing.T) { }, }, { - Name: "certs-client", + Name: "docker-sock", VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: resource.NewScaledQuantity(1, resource.Mega), + }, }, }, } @@ -501,9 +503,8 @@ func TestNewRunnerPod(t *testing.T) { MountPath: "/runner", }, { - Name: "certs-client", - MountPath: "/certs/client", - ReadOnly: true, + Name: "docker-sock", + MountPath: "/run/docker", }, } }), @@ -527,9 +528,12 @@ func TestNewRunnerPod(t *testing.T) { }, }, { - Name: "certs-client", + Name: "docker-sock", VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: resource.NewScaledQuantity(1, resource.Mega), + }, }, }, } @@ -606,9 +610,12 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { }, }, { - Name: "certs-client", + Name: "docker-sock", VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: resource.NewScaledQuantity(1, resource.Mega), + }, }, }, }, @@ -667,15 +674,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { }, { Name: "DOCKER_HOST", - Value: "tcp://localhost:2376", - }, - { - Name: "DOCKER_TLS_VERIFY", - Value: "1", - }, - { - Name: "DOCKER_CERT_PATH", - Value: "/certs/client", + Value: "unix:///run/docker/docker.sock", }, { Name: "RUNNER_NAME", @@ -696,9 +695,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { MountPath: "/runner/_work", }, { - Name: "certs-client", - MountPath: "/certs/client", - ReadOnly: true, + Name: "docker-sock", + MountPath: "/run/docker", }, }, ImagePullPolicy: corev1.PullAlways, @@ -707,10 +705,15 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { { Name: "docker", Image: "default-docker-image", + Args: []string{ + "dockerd", + "--host=unix:///run/docker/docker.sock", + "--group=$(DOCKER_GROUP_GID)", + }, Env: []corev1.EnvVar{ { - Name: "DOCKER_TLS_CERTDIR", - Value: "/certs", + Name: "DOCKER_GROUP_GID", + Value: "121", }, }, VolumeMounts: []corev1.VolumeMount{ @@ -719,8 +722,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { MountPath: "/runner", }, { - Name: "certs-client", - MountPath: "/certs/client", + Name: "docker-sock", + MountPath: "/run/docker", }, { Name: "work", @@ -1079,6 +1082,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { Name: "work", MountPath: "/runner/_work", }, + { + Name: "docker-sock", + MountPath: "/run/docker", + }, }, }, }, @@ -1097,9 +1104,12 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { }, }, { - Name: "certs-client", + Name: "docker-sock", VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: resource.NewScaledQuantity(1, resource.Mega), + }, }, }, workGenericEphemeralVolume, @@ -1110,13 +1120,12 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { MountPath: "/runner/_work", }, { - Name: "runner", - MountPath: "/runner", + Name: "docker-sock", + MountPath: "/run/docker", }, { - Name: "certs-client", - MountPath: "/certs/client", - ReadOnly: true, + Name: "runner", + MountPath: "/runner", }, } }), @@ -1144,9 +1153,12 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { }, }, { - Name: "certs-client", + Name: "docker-sock", VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: resource.NewScaledQuantity(1, resource.Mega), + }, }, }, workGenericEphemeralVolume, diff --git a/controllers/actions.summerwind.net/runner_controller.go b/controllers/actions.summerwind.net/runner_controller.go index c208d1a225..4fa00968fd 100644 --- a/controllers/actions.summerwind.net/runner_controller.go +++ b/controllers/actions.summerwind.net/runner_controller.go @@ -30,6 +30,7 @@ import ( "github.com/go-logr/logr" kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" @@ -1001,6 +1002,35 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru ) } + // explicitly invoke `dockerd` to avoid automatic TLS / TCP binding + dockerdContainer.Args = append([]string{ + "dockerd", + "--host=unix:///run/docker/docker.sock", + }, dockerdContainer.Args...) + + // this must match a GID for the user in the runner image + // default matches GitHub Actions infra (and default runner images + // for actions-runner-controller) so typically should not need to be + // overridden + if ok, _ := envVarPresent("DOCKER_GROUP_GID", dockerdContainer.Env); !ok { + dockerdContainer.Env = append(dockerdContainer.Env, + corev1.EnvVar{ + Name: "DOCKER_GROUP_GID", + Value: "121", + }) + } + dockerdContainer.Args = append(dockerdContainer.Args, "--group=$(DOCKER_GROUP_GID)") + + // ideally, we could mount the socket directly at `/var/run/docker.sock` + // to use the default, but that's not practical since it won't exist + // when the container starts, so can't use subPath on the volume mount + runnerContainer.Env = append(runnerContainer.Env, + corev1.EnvVar{ + Name: "DOCKER_HOST", + Value: "unix:///run/docker/docker.sock", + }, + ) + if ok, _ := workVolumePresent(pod.Spec.Volumes); !ok { pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ @@ -1014,9 +1044,12 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ - Name: "certs-client", + Name: "docker-sock", VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: resource.NewScaledQuantity(1, resource.Mega), + }, }, }, ) @@ -1030,28 +1063,14 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru ) } - runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts, - corev1.VolumeMount{ - Name: "certs-client", - MountPath: "/certs/client", - ReadOnly: true, - }, - ) - - runnerContainer.Env = append(runnerContainer.Env, []corev1.EnvVar{ - { - Name: "DOCKER_HOST", - Value: "tcp://localhost:2376", - }, - { - Name: "DOCKER_TLS_VERIFY", - Value: "1", - }, - { - Name: "DOCKER_CERT_PATH", - Value: "/certs/client", - }, - }...) + if ok, _ := volumeMountPresent("docker-sock", runnerContainer.VolumeMounts); !ok { + runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts, + corev1.VolumeMount{ + Name: "docker-sock", + MountPath: "/run/docker", + }, + ) + } // Determine the volume mounts assigned to the docker sidecar. In case extra mounts are included in the RunnerSpec, append them to the standard // set of mounts. See https://github.com/actions/actions-runner-controller/issues/435 for context. @@ -1060,14 +1079,16 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru Name: runnerVolumeName, MountPath: runnerVolumeMountPath, }, - { - Name: "certs-client", - MountPath: "/certs/client", - }, } - mountPresent, _ := workVolumeMountPresent(dockerdContainer.VolumeMounts) - if !mountPresent { + if p, _ := volumeMountPresent("docker-sock", dockerdContainer.VolumeMounts); !p { + dockerVolumeMounts = append(dockerVolumeMounts, corev1.VolumeMount{ + Name: "docker-sock", + MountPath: "/run/docker", + }) + } + + if p, _ := workVolumeMountPresent(dockerdContainer.VolumeMounts); !p { dockerVolumeMounts = append(dockerVolumeMounts, corev1.VolumeMount{ Name: "work", MountPath: workDir, @@ -1078,11 +1099,6 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru dockerdContainer.Image = defaultDockerImage } - dockerdContainer.Env = append(dockerdContainer.Env, corev1.EnvVar{ - Name: "DOCKER_TLS_CERTDIR", - Value: "/certs", - }) - if dockerdContainer.SecurityContext == nil { dockerdContainer.SecurityContext = &corev1.SecurityContext{ Privileged: &privileged, @@ -1273,6 +1289,15 @@ func removeFinalizer(finalizers []string, finalizerName string) ([]string, bool) return result, removed } +func envVarPresent(name string, items []corev1.EnvVar) (bool, int) { + for index, item := range items { + if item.Name == name { + return true, index + } + } + return false, -1 +} + func workVolumePresent(items []corev1.Volume) (bool, int) { for index, item := range items { if item.Name == "work" { @@ -1283,12 +1308,16 @@ func workVolumePresent(items []corev1.Volume) (bool, int) { } func workVolumeMountPresent(items []corev1.VolumeMount) (bool, int) { + return volumeMountPresent("work", items) +} + +func volumeMountPresent(name string, items []corev1.VolumeMount) (bool, int) { for index, item := range items { - if item.Name == "work" { + if item.Name == name { return true, index } } - return false, 0 + return false, -1 } func applyWorkVolumeClaimTemplateToPod(pod *corev1.Pod, workVolumeClaimTemplate *v1alpha1.WorkVolumeClaimTemplate, workDir string) error { diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 48e5fc2dfa..f6681558a3 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -1096,7 +1096,6 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam if !kubernetesContainerMode { setupBuildXActionWith := &testing.With{ BuildkitdFlags: "--debug", - Endpoint: "mycontext", // As the consequence of setting `install: false`, it doesn't install buildx as an alias to `docker build` // so we need to use `docker buildx build` in the next step Install: false, @@ -1122,16 +1121,24 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam setupBuildXActionWith.Driver = "docker" dockerfile = "Dockerfile.nocache" } - steps = append(steps, - testing.Step{ + + useCustomDockerContext := os.Getenv("ARC_E2E_USE_CUSTOM_DOCKER_CONTEXT") != "" + if useCustomDockerContext { + setupBuildXActionWith.Endpoint = "mycontext" + + steps = append(steps, testing.Step{ // https://github.com/docker/buildx/issues/413#issuecomment-710660155 // To prevent setup-buildx-action from failing with: // error: could not create a builder instance with TLS data loaded from environment. Please use `docker context create ` to create a context for current environment and then create a builder instance with `docker buildx create ` Run: "docker context create mycontext", }, - testing.Step{ - Run: "docker context use mycontext", - }, + testing.Step{ + Run: "docker context use mycontext", + }, + ) + } + + steps = append(steps, testing.Step{ Name: "Set up Docker Buildx", Uses: "docker/setup-buildx-action@v1", From 7a39c486f7c153daeb149ed61d3746c48b5f5705 Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Tue, 28 Mar 2023 10:16:38 +0100 Subject: [PATCH 162/561] Remove containerMode from values (#2442) --- .../templates/autoscalingrunnerset.yaml | 17 ++++++------- .../templates/kube_mode_role.yaml | 5 ++-- .../templates/kube_mode_role_binding.yaml | 5 ++-- .../templates/kube_mode_serviceaccount.yaml | 5 ++-- .../no_permission_serviceaccount.yaml | 5 ++-- charts/gha-runner-scale-set/values.yaml | 24 +++++++++---------- 6 files changed, 33 insertions(+), 28 deletions(-) diff --git a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index 455a7d0b33..df43ffbb62 100644 --- a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -91,14 +91,15 @@ spec: {{ $key }}: {{ $val | toYaml | nindent 8 }} {{- end }} {{- end }} - {{- if eq .Values.containerMode.type "kubernetes" }} + {{- $containerMode := .Values.containerMode }} + {{- if eq $containerMode.type "kubernetes" }} serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }} {{- else }} serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }} {{- end }} - {{- if or .Values.template.spec.initContainers (eq .Values.containerMode.type "dind") }} + {{- if or .Values.template.spec.initContainers (eq $containerMode.type "dind") }} initContainers: - {{- if eq .Values.containerMode.type "dind" }} + {{- if eq $containerMode.type "dind" }} - name: init-dind-externals {{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }} {{- end }} @@ -107,13 +108,13 @@ spec: {{- end }} {{- end }} containers: - {{- if eq .Values.containerMode.type "dind" }} + {{- if eq $containerMode.type "dind" }} - name: runner {{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }} - name: dind {{- include "gha-runner-scale-set.dind-container" . | nindent 8 }} {{- include "gha-runner-scale-set.non-runner-non-dind-containers" . | nindent 6 }} - {{- else if eq .Values.containerMode.type "kubernetes" }} + {{- else if eq $containerMode.type "kubernetes" }} - name: runner {{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }} {{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }} @@ -121,16 +122,16 @@ spec: {{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }} {{- end }} {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} - {{- if or .Values.template.spec.volumes (eq .Values.containerMode.type "dind") (eq .Values.containerMode.type "kubernetes") $tlsConfig.runnerMountPath }} + {{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") $tlsConfig.runnerMountPath }} volumes: {{- if $tlsConfig.runnerMountPath }} {{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }} {{- end }} - {{- if eq .Values.containerMode.type "dind" }} + {{- if eq $containerMode.type "dind" }} {{- include "gha-runner-scale-set.dind-volume" . | nindent 6 }} {{- include "gha-runner-scale-set.dind-work-volume" . | nindent 6 }} {{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }} - {{- else if eq .Values.containerMode.type "kubernetes" }} + {{- else if eq $containerMode.type "kubernetes" }} {{- include "gha-runner-scale-set.kubernetes-mode-work-volume" . | nindent 6 }} {{- include "gha-runner-scale-set.non-work-volumes" . | nindent 6 }} {{- else }} diff --git a/charts/gha-runner-scale-set/templates/kube_mode_role.yaml b/charts/gha-runner-scale-set/templates/kube_mode_role.yaml index ffc0c68e75..9f98e0dc4c 100644 --- a/charts/gha-runner-scale-set/templates/kube_mode_role.yaml +++ b/charts/gha-runner-scale-set/templates/kube_mode_role.yaml @@ -1,4 +1,5 @@ -{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} +{{- $containerMode := .Values.containerMode }} +{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} # default permission for runner pod service account in kubernetes mode (container hook) apiVersion: rbac.authorization.k8s.io/v1 kind: Role @@ -21,4 +22,4 @@ rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get", "list", "create", "delete"] -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml b/charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml index bd1c634db7..fb04e7a316 100644 --- a/charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml +++ b/charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml @@ -1,4 +1,5 @@ -{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} +{{- $containerMode := .Values.containerMode }} +{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -12,4 +13,4 @@ subjects: - kind: ServiceAccount name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }} namespace: {{ .Release.Namespace }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml b/charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml index 8f180f71bd..13b31ba2ae 100644 --- a/charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml +++ b/charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml @@ -1,4 +1,5 @@ -{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} +{{- $containerMode := .Values.containerMode }} +{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} apiVersion: v1 kind: ServiceAccount metadata: @@ -6,4 +7,4 @@ metadata: namespace: {{ .Release.Namespace }} labels: {{- include "gha-runner-scale-set.labels" . | nindent 4 }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml b/charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml index 3aa2d0e277..8175c874b8 100644 --- a/charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml +++ b/charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml @@ -1,4 +1,5 @@ -{{- if and (ne .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} +{{- $containerMode := .Values.containerMode }} +{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} apiVersion: v1 kind: ServiceAccount metadata: @@ -6,4 +7,4 @@ metadata: namespace: {{ .Release.Namespace }} labels: {{- include "gha-runner-scale-set.labels" . | nindent 4 }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/gha-runner-scale-set/values.yaml b/charts/gha-runner-scale-set/values.yaml index 40fb46611c..dd6b5b894a 100644 --- a/charts/gha-runner-scale-set/values.yaml +++ b/charts/gha-runner-scale-set/values.yaml @@ -68,16 +68,16 @@ githubConfigSecret: # key: ca.pem # runnerMountPath: /usr/local/share/ca-certificates/ -containerMode: - type: "" ## type can be set to dind or kubernetes - ## the following is required when containerMode.type=kubernetes - # kubernetesModeWorkVolumeClaim: - # accessModes: ["ReadWriteOnce"] - # # For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath - # storageClassName: "dynamic-blob-storage" - # resources: - # requests: - # storage: 1Gi +# containerMode: +# type: "dind" ## type can be set to dind or kubernetes +# ## the following is required when containerMode.type=kubernetes +# kubernetesModeWorkVolumeClaim: +# accessModes: ["ReadWriteOnce"] +# # For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath +# storageClassName: "dynamic-blob-storage" +# resources: +# requests: +# storage: 1Gi ## template is the PodSpec for each runner Pod template: @@ -161,7 +161,7 @@ template: image: ghcr.io/actions/actions-runner:latest command: ["/home/runner/run.sh"] -## Optional controller service account that needs to have required Role and RoleBinding +## Optional controller service account that needs to have required Role and RoleBinding ## to operate this gha-runner-scale-set installation. ## The helm chart will try to find the controller deployment and its service account at installation time. ## In case the helm chart can't find the right service account, you can explicitly pass in the following value @@ -169,4 +169,4 @@ template: ## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly. # controllerServiceAccount: # namespace: arc-system -# name: test-arc-gha-runner-scale-set-controller \ No newline at end of file +# name: test-arc-gha-runner-scale-set-controller From b0221f9b898467f4958f1b0066484de5afb8441d Mon Sep 17 00:00:00 2001 From: Milas Bowman Date: Tue, 28 Mar 2023 21:40:10 -0400 Subject: [PATCH 163/561] Install Docker Compose v2 as a Docker CLI plugin (#2326) Co-authored-by: Yusuke Kuoka --- ...tions-runner-dind-rootless.ubuntu-20.04.dockerfile | 8 ++++++-- ...tions-runner-dind-rootless.ubuntu-22.04.dockerfile | 9 ++++++--- runner/actions-runner-dind.ubuntu-20.04.dockerfile | 8 ++++++-- runner/actions-runner-dind.ubuntu-22.04.dockerfile | 8 ++++++-- runner/actions-runner.ubuntu-20.04.dockerfile | 8 ++++++-- runner/actions-runner.ubuntu-22.04.dockerfile | 8 ++++++-- test/e2e/e2e_test.go | 11 +++++++++++ 7 files changed, 47 insertions(+), 13 deletions(-) diff --git a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile index 1ee6f2b899..33d3c3d22e 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile @@ -139,8 +139,12 @@ RUN export SKIP_IPTABLES=1 \ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ && if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \ - && curl -fLo /home/runner/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \ - && chmod +x /home/runner/bin/docker-compose + && mkdir -p /home/runner/.docker/cli-plugins \ + && curl -fLo /home/runner/.docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \ + && chmod +x /home/runner/.docker/cli-plugins/docker-compose \ + && ln -s /home/runner/.docker/cli-plugins/docker-compose /home/runner/bin/docker-compose \ + && which docker-compose \ + && docker compose version ENTRYPOINT ["/bin/bash", "-c"] CMD ["entrypoint-dind-rootless.sh"] diff --git a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile index b5f02d9088..3e35d183fd 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile @@ -116,9 +116,12 @@ RUN export SKIP_IPTABLES=1 \ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ && if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \ - && mkdir -p /home/runner/bin \ - && curl -fLo /home/runner/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-Linux-${ARCH} \ - && chmod +x /home/runner/bin/docker-compose + && mkdir -p /home/runner/.docker/cli-plugins \ + && curl -fLo /home/runner/.docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \ + && chmod +x /home/runner/.docker/cli-plugins/docker-compose \ + && ln -s /home/runner/.docker/cli-plugins/docker-compose /home/runner/bin/docker-compose \ + && which docker-compose \ + && docker compose version ENTRYPOINT ["/bin/bash", "-c"] CMD ["entrypoint-dind-rootless.sh"] diff --git a/runner/actions-runner-dind.ubuntu-20.04.dockerfile b/runner/actions-runner-dind.ubuntu-20.04.dockerfile index da19b4b896..053ccc1c1b 100644 --- a/runner/actions-runner-dind.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-20.04.dockerfile @@ -106,8 +106,12 @@ RUN set -vx; \ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ && if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \ - && curl -fLo /usr/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \ - && chmod +x /usr/bin/docker-compose + && mkdir -p /usr/libexec/docker/cli-plugins \ + && curl -fLo /usr/libexec/docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \ + && chmod +x /usr/libexec/docker/cli-plugins/docker-compose \ + && ln -s /usr/libexec/docker/cli-plugins/docker-compose /usr/bin/docker-compose \ + && which docker-compose \ + && docker compose version # We place the scripts in `/usr/bin` so that users who extend this image can # override them with scripts of the same name placed in `/usr/local/bin`. diff --git a/runner/actions-runner-dind.ubuntu-22.04.dockerfile b/runner/actions-runner-dind.ubuntu-22.04.dockerfile index 3532f2a514..6ee33dd236 100644 --- a/runner/actions-runner-dind.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-22.04.dockerfile @@ -82,8 +82,12 @@ RUN set -vx; \ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ && if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \ - && curl -fLo /usr/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \ - && chmod +x /usr/bin/docker-compose + && mkdir -p /usr/libexec/docker/cli-plugins \ + && curl -fLo /usr/libexec/docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \ + && chmod +x /usr/libexec/docker/cli-plugins/docker-compose \ + && ln -s /usr/libexec/docker/cli-plugins/docker-compose /usr/bin/docker-compose \ + && which docker-compose \ + && docker compose version # We place the scripts in `/usr/bin` so that users who extend this image can # override them with scripts of the same name placed in `/usr/local/bin`. diff --git a/runner/actions-runner.ubuntu-20.04.dockerfile b/runner/actions-runner.ubuntu-20.04.dockerfile index 5c246a0cd0..83d55bbab6 100644 --- a/runner/actions-runner.ubuntu-20.04.dockerfile +++ b/runner/actions-runner.ubuntu-20.04.dockerfile @@ -103,8 +103,12 @@ RUN set -vx; \ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ && if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \ - && curl -fLo /usr/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \ - && chmod +x /usr/bin/docker-compose + && mkdir -p /usr/libexec/docker/cli-plugins \ + && curl -fLo /usr/libexec/docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \ + && chmod +x /usr/libexec/docker/cli-plugins/docker-compose \ + && ln -s /usr/libexec/docker/cli-plugins/docker-compose /usr/bin/docker-compose \ + && which docker-compose \ + && docker compose version # We place the scripts in `/usr/bin` so that users who extend this image can # override them with scripts of the same name placed in `/usr/local/bin`. diff --git a/runner/actions-runner.ubuntu-22.04.dockerfile b/runner/actions-runner.ubuntu-22.04.dockerfile index 966856fbba..28a61eb8c7 100644 --- a/runner/actions-runner.ubuntu-22.04.dockerfile +++ b/runner/actions-runner.ubuntu-22.04.dockerfile @@ -80,8 +80,12 @@ RUN set -vx; \ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \ && if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \ - && curl -fLo /usr/bin/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \ - && chmod +x /usr/bin/docker-compose + && mkdir -p /usr/libexec/docker/cli-plugins \ + && curl -fLo /usr/libexec/docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \ + && chmod +x /usr/libexec/docker/cli-plugins/docker-compose \ + && ln -s /usr/libexec/docker/cli-plugins/docker-compose /usr/bin/docker-compose \ + && which docker-compose \ + && docker compose version # We place the scripts in `/usr/bin` so that users who extend this image can # override them with scripts of the same name placed in `/usr/local/bin`. diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index f6681558a3..ef925ccc09 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -1081,6 +1081,17 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam }, }, ) + + // Ensure both the alias and the full command work after + // https://github.com/actions/actions-runner-controller/pull/2326 + steps = append(steps, + testing.Step{ + Run: "docker-compose version", + }, + testing.Step{ + Run: "docker compose version", + }, + ) } steps = append(steps, From a1e4bad4ca7f0e0d0e70d2c706c65b85ce2438d5 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Thu, 30 Mar 2023 10:10:18 +0900 Subject: [PATCH 164/561] chart: Bump version to 0.23.0 (#2449) --- charts/actions-runner-controller/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/actions-runner-controller/Chart.yaml b/charts/actions-runner-controller/Chart.yaml index 4d8ff15d0b..b48ac505c4 100644 --- a/charts/actions-runner-controller/Chart.yaml +++ b/charts/actions-runner-controller/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.22.0 +version: 0.23.0 # Used as the default manager tag value when no tag property is provided in the values.yaml -appVersion: 0.27.0 +appVersion: 0.27.1 home: https://github.com/actions/actions-runner-controller From d5aa72640b84dc86b08dde04ee64d5badce52c9e Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Thu, 30 Mar 2023 18:46:29 +0900 Subject: [PATCH 165/561] Fix chart publishing workflow to not throw away releases between the latest and 0.21.0 (#2453) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- .github/workflows/publish-chart.yaml | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish-chart.yaml b/.github/workflows/publish-chart.yaml index aaae2828ad..a307a36e8f 100644 --- a/.github/workflows/publish-chart.yaml +++ b/.github/workflows/publish-chart.yaml @@ -20,7 +20,7 @@ env: HELM_VERSION: v3.8.0 permissions: - contents: read + contents: write jobs: lint-chart: @@ -173,10 +173,28 @@ jobs: --pages-branch 'gh-pages' \ --pages-index-path 'index.yaml' + # This step is required to not throw away changes made to the index.yaml on every new chart release. + # + # We update the index.yaml in the actions-runner-controller.github.io repo + # by appending the new chart version to the index.yaml saved in actions-runner-controller repo + # and copying and commiting the updated index.yaml to the github.io one. + # See below for more context: + # - https://github.com/actions-runner-controller/actions-runner-controller.github.io/pull/2 + # - https://github.com/actions/actions-runner-controller/pull/2452 + - name: Commit and push to actions/actions-runner-controller + run: | + git checkout gh-pages + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + git add . + git commit -m "Update index.yaml" + git push + working-directory: ${{ github.workspace }} + # Chart Release was never intended to publish to a different repo # this workaround is intended to move the index.yaml to the target repo # where the github pages are hosted - - name: Checkout pages repository + - name: Checkout target repository uses: actions/checkout@v3 with: repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }} @@ -188,7 +206,7 @@ jobs: run: | cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml - - name: Commit and push + - name: Commit and push to target repository run: | git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" From 5867113361f4fbf8cfac1cde193a73ef6abec60a Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Thu, 30 Mar 2023 15:40:28 +0200 Subject: [PATCH 166/561] Fix bug preventing env variables from being specified (#2450) Co-authored-by: Tingluo Huang --- .../templates/deployment.yaml | 9 +-- .../tests/template_test.go | 56 ++++++++++++++++++- .../values.yaml | 51 ++++++++++------- 3 files changed, 88 insertions(+), 28 deletions(-) diff --git a/charts/gha-runner-scale-set-controller/templates/deployment.yaml b/charts/gha-runner-scale-set-controller/templates/deployment.yaml index b239040a8f..b624d963b5 100644 --- a/charts/gha-runner-scale-set-controller/templates/deployment.yaml +++ b/charts/gha-runner-scale-set-controller/templates/deployment.yaml @@ -69,13 +69,8 @@ spec: fieldRef: fieldPath: metadata.namespace {{- with .Values.env }} - {{- if kindIs "slice" .Values.env }} - {{- toYaml .Values.env | nindent 8 }} - {{- else }} - {{- range $key, $val := .Values.env }} - - name: {{ $key }} - value: {{ $val | quote }} - {{- end }} + {{- if kindIs "slice" . }} + {{- toYaml . | nindent 8 }} {{- end }} {{- end }} {{- with .Values.resources }} diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index 96b671eb0f..3ee12f7df0 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -390,6 +390,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { "imagePullSecrets[0].name": "dockerhub", "nameOverride": "gha-runner-scale-set-controller-override", "fullnameOverride": "gha-runner-scale-set-controller-fullname-override", + "env[0].name": "ENV_VAR_NAME_1", + "env[0].value": "ENV_VAR_VALUE_1", "serviceAccount.name": "gha-runner-scale-set-controller-sa", "podAnnotations.foo": "bar", "podSecurityContext.fsGroup": "1000", @@ -432,6 +434,9 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"]) assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"]) + assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name) + assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) + assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1) assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name) assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName) @@ -467,10 +472,13 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value) + assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name) + assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) + assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath) @@ -704,6 +712,52 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) { assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath) } +func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller") + require.NoError(t, err) + + releaseName := "test-arc" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "env[0].Name": "ENV_VAR_NAME_1", + "env[0].Value": "ENV_VAR_VALUE_1", + "env[1].Name": "ENV_VAR_NAME_2", + "env[1].ValueFrom.SecretKeyRef.Key": "ENV_VAR_NAME_2", + "env[1].ValueFrom.SecretKeyRef.Name": "secret-name", + "env[1].ValueFrom.SecretKeyRef.Optional": "true", + "env[2].Name": "ENV_VAR_NAME_3", + "env[2].Value": "", + "env[3].Name": "ENV_VAR_NAME_4", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"}) + + var deployment appsv1.Deployment + helm.UnmarshalK8SYaml(t, output, &deployment) + + assert.Equal(t, namespaceName, deployment.Namespace) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name) + + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 6) + assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name) + assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) + assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[3].Name) + assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Name) + assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Key) + assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Optional) + assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[4].Name) + assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[4].Value) + assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[5].Name) + assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].ValueFrom) +} + func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) { t.Parallel() diff --git a/charts/gha-runner-scale-set-controller/values.yaml b/charts/gha-runner-scale-set-controller/values.yaml index 055d68e933..03359b5f6a 100644 --- a/charts/gha-runner-scale-set-controller/values.yaml +++ b/charts/gha-runner-scale-set-controller/values.yaml @@ -18,6 +18,17 @@ imagePullSecrets: [] nameOverride: "" fullnameOverride: "" +env: +## Define environment variables for the controller pod +# - name: "ENV_VAR_NAME_1" +# value: "ENV_VAR_VALUE_1" +# - name: "ENV_VAR_NAME_2" +# valueFrom: +# secretKeyRef: +# key: ENV_VAR_NAME_2 +# name: secret-name +# optional: true + serviceAccount: # Specifies whether a service account should be created for running the controller pod create: true @@ -31,27 +42,27 @@ serviceAccount: podAnnotations: {} podSecurityContext: {} - # fsGroup: 2000 +# fsGroup: 2000 securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 +# capabilities: +# drop: +# - ALL +# readOnlyRootFilesystem: true +# runAsNonRoot: true +# runAsUser: 1000 resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi nodeSelector: {} @@ -69,6 +80,6 @@ flags: # Defaults to "debug". logLevel: "debug" - # Restricts the controller to only watch resources in the desired namespace. - # Defaults to watch all namespaces when unset. - # watchSingleNamespace: "" \ No newline at end of file + ## Restricts the controller to only watch resources in the desired namespace. + ## Defaults to watch all namespaces when unset. + # watchSingleNamespace: "" From 0e72a340b12fd1208ba84489c7bc52cdb98fdef3 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Fri, 31 Mar 2023 20:42:25 +0900 Subject: [PATCH 167/561] actions-metrics: Do our best not to fail the whole event processing on no API creds (#2459) --- pkg/actionsmetrics/event_reader.go | 35 ++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/pkg/actionsmetrics/event_reader.go b/pkg/actionsmetrics/event_reader.go index 1874d4ae11..12b5f9b737 100644 --- a/pkg/actionsmetrics/event_reader.go +++ b/pkg/actionsmetrics/event_reader.go @@ -136,12 +136,27 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in // job_conclusion -> (neutral, success, skipped, cancelled, timed_out, action_required, failure) githubWorkflowJobConclusionsTotal.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Inc() - parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e) - if err != nil { - log.Error(err, "reading workflow job log") - return - } else { - log.Info("reading workflow_job logs", keysAndValues...) + var ( + exitCode = "na" + runTimeSeconds *float64 + ) + + // We need to do our best not to fail the whole event processing + // when the user provided no GitHub API credentials. + // See https://github.com/actions/actions-runner-controller/issues/2424 + if reader.GitHubClient != nil { + parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e) + if err != nil { + log.Error(err, "reading workflow job log") + return + } + + exitCode = parseResult.ExitCode + + s := parseResult.RunTime.Seconds() + runTimeSeconds = &s + + log.WithValues(keysAndValues...).Info("reading workflow_job logs", "exit_code", exitCode) } if *e.WorkflowJob.Conclusion == "failure" { @@ -167,18 +182,20 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in } if *conclusion == "timed_out" { failedStep = fmt.Sprint(i) - parseResult.ExitCode = "timed_out" + exitCode = "timed_out" break } } githubWorkflowJobFailuresTotal.With( extraLabel("failed_step", failedStep, - extraLabel("exit_code", parseResult.ExitCode, labels), + extraLabel("exit_code", exitCode, labels), ), ).Inc() } - githubWorkflowJobRunDurationSeconds.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Observe(parseResult.RunTime.Seconds()) + if runTimeSeconds != nil { + githubWorkflowJobRunDurationSeconds.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Observe(*runTimeSeconds) + } } } From dc5782b77142b78b52b8c6e2f9aa5e9f37db06c0 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Fri, 31 Mar 2023 10:31:25 -0400 Subject: [PATCH 168/561] Add E2E test to assert self-signed CA support. (#2458) --- .github/workflows/e2e-test-linux-vm.yaml | 116 +++++++++++++++++++++++ charts/gha-runner-scale-set/values.yaml | 2 +- 2 files changed, 117 insertions(+), 1 deletion(-) diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index a054149bc8..b7d5cf1349 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -565,3 +565,119 @@ jobs: arc-name: ${{steps.install_arc.outputs.ARC_NAME}} arc-namespace: "arc-runners" arc-controller-namespace: "arc-systems" + + self-signed-ca-setup: + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id + env: + WORKFLOW_FILE: "arc-test-workflow.yaml" + steps: + - uses: actions/checkout@v3 + with: + ref: ${{github.head_ref}} + + - uses: ./.github/actions/setup-arc-e2e + id: setup + with: + app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}} + app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}} + image-name: ${{env.IMAGE_NAME}} + image-tag: ${{env.IMAGE_VERSION}} + target-org: ${{env.TARGET_ORG}} + + - name: Install gha-runner-scale-set-controller + id: install_arc_controller + run: | + helm install arc \ + --namespace "arc-systems" \ + --create-namespace \ + --set image.repository=${{ env.IMAGE_NAME }} \ + --set image.tag=${{ env.IMAGE_VERSION }} \ + ./charts/gha-runner-scale-set-controller \ + --debug + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller + kubectl get pod -n arc-systems + kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems + + - name: Install gha-runner-scale-set + id: install_arc + run: | + docker run -d \ + --rm \ + --name mitmproxy \ + --publish 8080:8080 \ + -v ${{ github.workspace }}/mitmproxy:/home/mitmproxy/.mitmproxy \ + mitmproxy/mitmproxy:latest \ + mitmdump + count=0 + while true; do + if [ -f "${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem" ]; then + echo "CA cert generated" + cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for mitmproxy generate its CA cert" + exit 1 + fi + sleep 1 + done + sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt + sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt + kubectl create namespace arc-runners + kubectl -n arc-runners create configmap ca-cert --from-file="${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt" + kubectl -n arc-runners get configmap ca-cert -o yaml + ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1)) + helm install "$ARC_NAME" \ + --namespace "arc-runners" \ + --create-namespace \ + --set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \ + --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ + --set proxy.https.url="http://host.minikube.internal:8080" \ + --set "proxy.noProxy[0]=10.96.0.1:443" \ + --set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert" \ + --set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.crt" \ + --set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \ + ./charts/gha-runner-scale-set \ + --debug + echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 10 ]; then + echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" + exit 1 + fi + sleep 1 + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME + kubectl get pod -n arc-systems + + - name: Test ARC E2E + uses: ./.github/actions/execute-assert-arc-e2e + timeout-minutes: 10 + with: + auth-token: ${{ steps.setup.outputs.token }} + repo-owner: ${{ env.TARGET_ORG }} + repo-name: ${{env.TARGET_REPO}} + workflow-file: ${{env.WORKFLOW_FILE}} + arc-name: ${{steps.install_arc.outputs.ARC_NAME}} + arc-namespace: "arc-runners" + arc-controller-namespace: "arc-systems" diff --git a/charts/gha-runner-scale-set/values.yaml b/charts/gha-runner-scale-set/values.yaml index dd6b5b894a..bbd58ac851 100644 --- a/charts/gha-runner-scale-set/values.yaml +++ b/charts/gha-runner-scale-set/values.yaml @@ -65,7 +65,7 @@ githubConfigSecret: # certificateFrom: # configMapKeyRef: # name: config-map-name -# key: ca.pem +# key: ca.crt # runnerMountPath: /usr/local/share/ca-certificates/ # containerMode: From 24af2420b5209e2c82b38159ade6cdcc46ac9ec6 Mon Sep 17 00:00:00 2001 From: Stewart Thomson <108886656+sthomson-wyn@users.noreply.github.com> Date: Sun, 2 Apr 2023 20:06:59 -0400 Subject: [PATCH 169/561] Check if appID and instID are non-empty before attempting to parseInt (#2463) --- .../actions.summerwind.net/multi_githubclient.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/controllers/actions.summerwind.net/multi_githubclient.go b/controllers/actions.summerwind.net/multi_githubclient.go index 912a410ad5..4c96deb6f5 100644 --- a/controllers/actions.summerwind.net/multi_githubclient.go +++ b/controllers/actions.summerwind.net/multi_githubclient.go @@ -285,16 +285,20 @@ func secretDataToGitHubClientConfig(data map[string][]byte) (*github.Config, err appID := string(data["github_app_id"]) - conf.AppID, err = strconv.ParseInt(appID, 10, 64) - if err != nil { - return nil, err + if appID != "" { + conf.AppID, err = strconv.ParseInt(appID, 10, 64) + if err != nil { + return nil, err + } } instID := string(data["github_app_installation_id"]) - conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64) - if err != nil { - return nil, err + if instID != "" { + conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64) + if err != nil { + return nil, err + } } conf.AppPrivateKey = string(data["github_app_private_key"]) From 293c4156a32d53045c86bfa22cd9efb931ada594 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 3 Apr 2023 13:01:15 +0200 Subject: [PATCH 170/561] Add troubleshooting advice (#2456) --- .../gha-runner-scale-set-controller/README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index 3c6f5e5099..50c1ada836 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -149,6 +149,24 @@ Upgrading actions-runner-controller requires a few extra steps because CRDs will ## Troubleshooting +### I'm using the charts from the `master` branch and the controller is not working + +The `master` branch is highly unstable! We offer no guarantees that the charts in the `master` branch will work at any given time. If you're using the charts from the `master` branch, you should expect to encounter issues. Please use the latest release instead. + +### Controller pod is running but the runner set listener pod is not + +You need to inspect the logs of the controller first and see if there are any errors. If there are no errors, and the runner set listener pod is still not running, you need to make sure that the **controller pod has access to the Kubernetes API server in your cluster!** + +You'll see something similar to the following in the logs of the controller pod: + +```log +kubectl logs -c manager +17:35:28.661069 1 request.go:690] Waited for 1.032376652s due to client-side throttling, not priority and fairness, request: GET:https://10.0.0.1:443/apis/monitoring.coreos.com/v1alpha1?timeout=32s +2023-03-15T17:35:29Z INFO starting manager +``` + +If you have a proxy configured or you're using a sidecar proxy that's automatically injected (think [Istio](https://istio.io/)), you need to make sure it's configured appropriately to allow traffic from the controller container (manager) to the Kubernetes API server. + ### Check the logs You can check the logs of the controller pod using the following command: From 64d8e16feee896c753495eb9eb587eba7bd051ed Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Mon, 3 Apr 2023 21:06:12 +0200 Subject: [PATCH 171/561] Fix helm uninstall cleanup by adding finalizers and cleaning them from the controller (#2433) Co-authored-by: Tingluo Huang --- .github/workflows/validate-gha-chart.yaml | 4 +- .../templates/manager_cluster_role.yaml | 1 + ...ager_single_namespace_controller_role.yaml | 2 +- .../manager_single_namespace_watch_role.yaml | 3 +- .../templates/_helpers.tpl | 18 +- .../templates/autoscalingrunnerset.yaml | 15 + .../templates/githubsecret.yaml | 4 +- .../templates/kube_mode_role.yaml | 2 + .../templates/kube_mode_role_binding.yaml | 4 +- .../templates/kube_mode_serviceaccount.yaml | 2 + .../templates/manager_role.yaml | 18 +- .../templates/manager_role_binding.yaml | 9 +- .../no_permission_serviceaccount.yaml | 2 + .../tests/template_test.go | 129 +++++- .../autoscalingrunnerset_controller.go | 400 +++++++++++++++++- .../autoscalingrunnerset_controller_test.go | 371 +++++++++++++++- .../actions.github.com/resourcebuilder.go | 11 + 17 files changed, 958 insertions(+), 37 deletions(-) diff --git a/.github/workflows/validate-gha-chart.yaml b/.github/workflows/validate-gha-chart.yaml index 0d54f6e27c..645b32e9d7 100644 --- a/.github/workflows/validate-gha-chart.yaml +++ b/.github/workflows/validate-gha-chart.yaml @@ -71,7 +71,7 @@ jobs: git clone https://github.com/helm/chart-testing cd chart-testing unset CT_CONFIG_DIR - goreleaser build --clean --skip-validate + goreleaser build --clean --skip-validate ./dist/chart-testing_linux_amd64_v1/ct version echo 'Adding ct directory to PATH...' echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH" @@ -107,7 +107,7 @@ jobs: load: true build-args: | DOCKER_IMAGE_NAME=test-arc - VERSION=dev + VERSION=dev tags: | test-arc:dev cache-from: type=gha diff --git a/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml index 3ea3127902..0ee3bb5395 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml @@ -133,4 +133,5 @@ rules: verbs: - list - watch + - patch {{- end }} diff --git a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role.yaml index a72dc7387d..7fd6e98850 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_controller_role.yaml @@ -81,4 +81,4 @@ rules: verbs: - list - watch -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml index bf840bcf41..f195da55cc 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml @@ -114,4 +114,5 @@ rules: verbs: - list - watch -{{- end }} \ No newline at end of file + - patch +{{- end }} diff --git a/charts/gha-runner-scale-set/templates/_helpers.tpl b/charts/gha-runner-scale-set/templates/_helpers.tpl index 45e7794535..202bb04d58 100644 --- a/charts/gha-runner-scale-set/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set/templates/_helpers.tpl @@ -11,17 +11,9 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this If release name contains chart name it will be used as a full name. */}} {{- define "gha-runner-scale-set.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} +{{- $name := default .Chart.Name }} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- end }} -{{- end }} -{{- end }} {{/* Create chart name and version as used by the chart label. @@ -41,6 +33,8 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/part-of: gha-runner-scale-set +actions.github.com/scale-set-name: {{ .Release.Name }} +actions.github.com/scale-set-namespace: {{ .Release.Namespace }} {{- end }} {{/* @@ -71,6 +65,10 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role {{- end }} +{{- define "gha-runner-scale-set.kubeModeRoleBindingName" -}} +{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role-binding +{{- end }} + {{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}} {{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account {{- end }} @@ -433,7 +431,7 @@ volumeMounts: {{- include "gha-runner-scale-set.fullname" . }}-manager-role {{- end }} -{{- define "gha-runner-scale-set.managerRoleBinding" -}} +{{- define "gha-runner-scale-set.managerRoleBindingName" -}} {{- include "gha-runner-scale-set.fullname" . }}-manager-role-binding {{- end }} diff --git a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index df43ffbb62..9e52e24952 100644 --- a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -12,6 +12,21 @@ metadata: labels: app.kubernetes.io/component: "autoscaling-runner-set" {{- include "gha-runner-scale-set.labels" . | nindent 4 }} + annotations: + {{- $containerMode := .Values.containerMode }} + {{- if not (kindIs "string" .Values.githubConfigSecret) }} + actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }} + {{- end }} + actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }} + actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }} + {{- if and $containerMode (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} + actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }} + actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }} + actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }} + {{- end }} + {{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} + actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }} + {{- end }} spec: githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }} githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }} diff --git a/charts/gha-runner-scale-set/templates/githubsecret.yaml b/charts/gha-runner-scale-set/templates/githubsecret.yaml index 03411486ac..67282c182b 100644 --- a/charts/gha-runner-scale-set/templates/githubsecret.yaml +++ b/charts/gha-runner-scale-set/templates/githubsecret.yaml @@ -7,7 +7,7 @@ metadata: labels: {{- include "gha-runner-scale-set.labels" . | nindent 4 }} finalizers: - - actions.github.com/secret-protection + - actions.github.com/cleanup-protection data: {{- $hasToken := false }} {{- $hasAppId := false }} @@ -36,4 +36,4 @@ data: {{- if and $hasAppId (or (not $hasInstallationId) (not $hasPrivateKey)) }} {{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key." }} {{- end }} -{{- end}} \ No newline at end of file +{{- end}} diff --git a/charts/gha-runner-scale-set/templates/kube_mode_role.yaml b/charts/gha-runner-scale-set/templates/kube_mode_role.yaml index 9f98e0dc4c..e82d7b7713 100644 --- a/charts/gha-runner-scale-set/templates/kube_mode_role.yaml +++ b/charts/gha-runner-scale-set/templates/kube_mode_role.yaml @@ -6,6 +6,8 @@ kind: Role metadata: name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }} namespace: {{ .Release.Namespace }} + finalizers: + - actions.github.com/cleanup-protection rules: - apiGroups: [""] resources: ["pods"] diff --git a/charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml b/charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml index fb04e7a316..060b9399e5 100644 --- a/charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml +++ b/charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml @@ -3,8 +3,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }} + name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }} namespace: {{ .Release.Namespace }} + finalizers: + - actions.github.com/cleanup-protection roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml b/charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml index 13b31ba2ae..a32eceef4a 100644 --- a/charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml +++ b/charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml @@ -5,6 +5,8 @@ kind: ServiceAccount metadata: name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }} namespace: {{ .Release.Namespace }} + finalizers: + - actions.github.com/cleanup-protection labels: {{- include "gha-runner-scale-set.labels" . | nindent 4 }} {{- end }} diff --git a/charts/gha-runner-scale-set/templates/manager_role.yaml b/charts/gha-runner-scale-set/templates/manager_role.yaml index 6f4cd9a67e..f6a1e49383 100644 --- a/charts/gha-runner-scale-set/templates/manager_role.yaml +++ b/charts/gha-runner-scale-set/templates/manager_role.yaml @@ -3,6 +3,11 @@ kind: Role metadata: name: {{ include "gha-runner-scale-set.managerRoleName" . }} namespace: {{ .Release.Namespace }} + labels: + {{- include "gha-runner-scale-set.labels" . | nindent 4 }} + app.kubernetes.io/component: manager-role + finalizers: + - actions.github.com/cleanup-protection rules: - apiGroups: - "" @@ -29,6 +34,17 @@ rules: - list - patch - update +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update - apiGroups: - rbac.authorization.k8s.io resources: @@ -56,4 +72,4 @@ rules: - configmaps verbs: - get -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/gha-runner-scale-set/templates/manager_role_binding.yaml b/charts/gha-runner-scale-set/templates/manager_role_binding.yaml index ba38ab0f0b..ce212f77e2 100644 --- a/charts/gha-runner-scale-set/templates/manager_role_binding.yaml +++ b/charts/gha-runner-scale-set/templates/manager_role_binding.yaml @@ -1,8 +1,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ include "gha-runner-scale-set.managerRoleBinding" . }} + name: {{ include "gha-runner-scale-set.managerRoleBindingName" . }} namespace: {{ .Release.Namespace }} + labels: + {{- include "gha-runner-scale-set.labels" . | nindent 4 }} + app.kubernetes.io/component: manager-role-binding + finalizers: + - actions.github.com/cleanup-protection roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -10,4 +15,4 @@ roleRef: subjects: - kind: ServiceAccount name: {{ include "gha-runner-scale-set.managerServiceAccountName" . | nindent 4 }} - namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }} \ No newline at end of file + namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }} diff --git a/charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml b/charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml index 8175c874b8..f7c9700f1a 100644 --- a/charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml +++ b/charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml @@ -7,4 +7,6 @@ metadata: namespace: {{ .Release.Namespace }} labels: {{- include "gha-runner-scale-set.labels" . | nindent 4 }} + finalizers: + - actions.github.com/cleanup-protection {{- end }} diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index 9c3692eed7..8676da6684 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -1,11 +1,13 @@ package tests import ( + "fmt" "path/filepath" "strings" "testing" v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com" "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" @@ -43,7 +45,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) { assert.Equal(t, namespaceName, githubSecret.Namespace) assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name) assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"])) - assert.Equal(t, "actions.github.com/secret-protection", githubSecret.Finalizers[0]) + assert.Equal(t, "actions.github.com/cleanup-protection", githubSecret.Finalizers[0]) } func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) { @@ -188,6 +190,7 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &ars) assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName) + assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) // no finalizer protections in place } func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { @@ -217,6 +220,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { assert.Equal(t, namespaceName, serviceAccount.Namespace) assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name) + assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0]) output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"}) var role rbacv1.Role @@ -224,6 +228,9 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { assert.Equal(t, namespaceName, role.Namespace) assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name) + + assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0]) + assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules") assert.Equal(t, "pods", role.Rules[0].Resources[0]) assert.Equal(t, "pods/exec", role.Rules[1].Resources[0]) @@ -236,18 +243,21 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &roleBinding) assert.Equal(t, namespaceName, roleBinding.Namespace) - assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role-binding", roleBinding.Name) assert.Len(t, roleBinding.Subjects, 1) assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name) assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace) assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name) assert.Equal(t, "Role", roleBinding.RoleRef.Kind) + assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0]) output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) var ars v1alpha1.AutoscalingRunnerSet helm.UnmarshalK8SYaml(t, output, &ars) - assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", ars.Spec.Template.Spec.ServiceAccountName) + expectedServiceAccountName := "test-runners-gha-runner-scale-set-kube-mode-service-account" + assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName) + assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) } func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) { @@ -279,6 +289,7 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &ars) assert.Equal(t, "test-service-account", ars.Spec.Template.Spec.ServiceAccountName) + assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) } func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) { @@ -1458,7 +1469,11 @@ func TestTemplate_CreateManagerRole(t *testing.T) { assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release") assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name) - assert.Equal(t, 5, len(managerRole.Rules)) + assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0]) + assert.Equal(t, 6, len(managerRole.Rules)) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) } func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) { @@ -1489,8 +1504,9 @@ func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) { assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release") assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name) - assert.Equal(t, 6, len(managerRole.Rules)) - assert.Equal(t, "configmaps", managerRole.Rules[5].Resources[0]) + assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0]) + assert.Equal(t, 7, len(managerRole.Rules)) + assert.Equal(t, "configmaps", managerRole.Rules[6].Resources[0]) } func TestTemplate_CreateManagerRoleBinding(t *testing.T) { @@ -1521,6 +1537,7 @@ func TestTemplate_CreateManagerRoleBinding(t *testing.T) { assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release") assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role-binding", managerRoleBinding.Name) assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRoleBinding.RoleRef.Name) + assert.Equal(t, "actions.github.com/cleanup-protection", managerRoleBinding.Finalizers[0]) assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name) assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace) } @@ -1692,3 +1709,103 @@ func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T) assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others") assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others") } + +func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + annotationExpectedTests := map[string]*helm.Options{ + "GitHub token": { + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + }, + "GitHub app": { + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_app_id": "10", + "githubConfigSecret.github_app_installation_id": "100", + "githubConfigSecret.github_app_private_key": "private_key", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + }, + } + + for name, options := range annotationExpectedTests { + t.Run("Annotation set: "+name, func(t *testing.T) { + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet) + + assert.NotEmpty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName]) + }) + } + + t.Run("Annotation should not be set", func(t *testing.T) { + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret": "pre-defined-secret", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet) + + assert.Empty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName]) + }) +} + +func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + SetValues: map[string]string{ + "githubConfigUrl": "https://github.com/actions", + "githubConfigSecret.github_token": "gh_token12345", + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + "containerMode.type": "kubernetes", + }, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet) + + annotationValues := map[string]string{ + actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-runner-scale-set-github-secret", + actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-runner-scale-set-manager-role", + actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-runner-scale-set-manager-role-binding", + actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-runner-scale-set-kube-mode-service-account", + actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-runner-scale-set-kube-mode-role", + actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-runner-scale-set-kube-mode-role-binding", + } + + for annotation, value := range annotationValues { + assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation)) + } +} diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 43c13823b1..903acb06e3 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -27,6 +27,7 @@ import ( "github.com/actions/actions-runner-controller/github/actions" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -42,11 +43,12 @@ import ( const ( // TODO: Replace with shared image. - autoscalingRunnerSetOwnerKey = ".metadata.controller" - LabelKeyRunnerSpecHash = "runner-spec-hash" - autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" - runnerScaleSetIdAnnotationKey = "runner-scale-set-id" - runnerScaleSetNameAnnotationKey = "runner-scale-set-name" + autoscalingRunnerSetOwnerKey = ".metadata.controller" + LabelKeyRunnerSpecHash = "runner-spec-hash" + autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" + runnerScaleSetIdAnnotationKey = "runner-scale-set-id" + runnerScaleSetNameAnnotationKey = "runner-scale-set-name" + autoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection" ) // AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object @@ -113,6 +115,17 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, err } + requeue, err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log) + if err != nil { + log.Error(err, "Failed to remove finalizers on dependent resources") + return ctrl.Result{}, err + } + + if requeue { + log.Info("Waiting for dependent resources to be deleted") + return ctrl.Result{Requeue: true}, nil + } + log.Info("Removing finalizer") err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName) @@ -305,6 +318,29 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C return nil } +func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) { + c := autoscalingRunnerSetFinalizerDependencyCleaner{ + client: r.Client, + autoscalingRunnerSet: autoscalingRunnerSet, + logger: logger, + } + + c.removeKubernetesModeRoleBindingFinalizer(ctx) + c.removeKubernetesModeRoleFinalizer(ctx) + c.removeKubernetesModeServiceAccountFinalizer(ctx) + c.removeNoPermissionServiceAccountFinalizer(ctx) + c.removeGitHubSecretFinalizer(ctx) + c.removeManagerRoleBindingFinalizer(ctx) + c.removeManagerRoleFinalizer(ctx) + + requeue, err = c.result() + if err != nil { + logger.Error(err, "Failed to cleanup finalizer from dependent resource") + return true, err + } + return requeue, nil +} + func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) { logger.Info("Creating a new runner scale set") actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) @@ -467,12 +503,28 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co } func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error { + scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey] + if !ok { + // Annotation not being present can occur in 3 scenarios + // 1. Scale set is never created. + // In this case, we don't need to fetch the actions client to delete the scale set that does not exist + // + // 2. The scale set has been deleted by the controller. + // In that case, the controller will clean up annotation because the scale set does not exist anymore. + // Removal of the scale set id is also useful because permission cleanup will eventually lose permission + // assigned to it on a GitHub secret, causing actions client from secret to result in permission denied + // + // 3. Annotation is removed manually. + // In this case, the controller will treat this as if the scale set is being removed from the actions service + // Then, manual deletion of the scale set is required. + return nil + } logger.Info("Deleting the runner scale set from Actions service") - runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) + runnerScaleSetId, err := strconv.Atoi(scaleSetId) if err != nil { - // If the annotation is not set correctly, or if it does not exist, we are going to get stuck in a loop trying to parse the scale set id. - // If the configuration is invalid (secret does not exist for example), we never get to the point to create runner set. But then, manual cleanup - // would get stuck finalizing the resource trying to parse annotation indefinitely + // If the annotation is not set correctly, we are going to get stuck in a loop trying to parse the scale set id. + // If the configuration is invalid (secret does not exist for example), we never got to the point to create runner set. + // But then, manual cleanup would get stuck finalizing the resource trying to parse annotation indefinitely logger.Info("autoscaling runner set does not have annotation describing scale set id. Skip deletion", "err", err.Error()) return nil } @@ -489,6 +541,14 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex return err } + err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { + delete(obj.Annotations, runnerScaleSetIdAnnotationKey) + }) + if err != nil { + logger.Error(err, "Failed to patch autoscaling runner set with annotation removed", "annotation", runnerScaleSetIdAnnotationKey) + return err + } + logger.Info("Deleted the runner scale set from Actions service") return nil } @@ -658,6 +718,328 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro Complete(r) } +type autoscalingRunnerSetFinalizerDependencyCleaner struct { + // configuration fields + client client.Client + autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet + logger logr.Logger + + // fields to operate on + requeue bool + err error +} + +func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool, err error) { + return c.requeue, c.err +} + +func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) { + if c.requeue || c.err != nil { + return + } + + roleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName] + if !ok { + c.logger.Info( + "Skipping cleaning up kubernetes mode service account", + "reason", + fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleBindingName), + ) + return + } + + c.logger.Info("Removing finalizer from container mode kubernetes role binding", "name", roleBindingName) + + roleBinding := new(rbacv1.RoleBinding) + err := c.client.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding) + switch { + case err == nil: + if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) { + c.logger.Info("Kubernetes mode role binding finalizer has already been removed", "name", roleBindingName) + return + } + err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) { + controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + }) + if err != nil { + c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err) + return + } + c.requeue = true + c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName) + return + case err != nil && !kerrors.IsNotFound(err): + c.err = fmt.Errorf("failed to fetch kubernetes mode role binding: %w", err) + return + default: + c.logger.Info("Container mode kubernetes role binding has already been deleted", "name", roleBindingName) + return + } +} + +func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) { + if c.requeue || c.err != nil { + return + } + + roleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName] + if !ok { + c.logger.Info( + "Skipping cleaning up kubernetes mode role", + "reason", + fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleName), + ) + return + } + + c.logger.Info("Removing finalizer from container mode kubernetes role", "name", roleName) + role := new(rbacv1.Role) + err := c.client.Get(ctx, types.NamespacedName{Name: roleName, Namespace: c.autoscalingRunnerSet.Namespace}, role) + switch { + case err == nil: + if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) { + c.logger.Info("Kubernetes mode role finalizer has already been removed", "name", roleName) + return + } + err = patch(ctx, c.client, role, func(obj *rbacv1.Role) { + controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + }) + if err != nil { + c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err) + return + } + c.requeue = true + c.logger.Info("Removed finalizer from container mode kubernetes role") + return + case err != nil && !kerrors.IsNotFound(err): + c.err = fmt.Errorf("failed to fetch kubernetes mode role: %w", err) + return + default: + c.logger.Info("Container mode kubernetes role has already been deleted", "name", roleName) + return + } +} + +func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) { + if c.requeue || c.err != nil { + return + } + + serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName] + if !ok { + c.logger.Info( + "Skipping cleaning up kubernetes mode role binding", + "reason", + fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeServiceAccountName), + ) + return + } + + c.logger.Info("Removing finalizer from container mode kubernetes service account", "name", serviceAccountName) + + serviceAccount := new(corev1.ServiceAccount) + err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount) + switch { + case err == nil: + if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) { + c.logger.Info("Kubernetes mode service account finalizer has already been removed", "name", serviceAccountName) + return + } + err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) { + controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + }) + if err != nil { + c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err) + return + } + c.requeue = true + c.logger.Info("Removed finalizer from container mode kubernetes service account") + return + case err != nil && !kerrors.IsNotFound(err): + c.err = fmt.Errorf("failed to fetch kubernetes mode service account: %w", err) + return + default: + c.logger.Info("Container mode kubernetes service account has already been deleted", "name", serviceAccountName) + return + } +} + +func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) { + if c.requeue || c.err != nil { + return + } + + serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName] + if !ok { + c.logger.Info( + "Skipping cleaning up no permission service account", + "reason", + fmt.Sprintf("annotation key %q not present", AnnotationKeyNoPermissionServiceAccountName), + ) + return + } + + c.logger.Info("Removing finalizer from no permission service account", "name", serviceAccountName) + + serviceAccount := new(corev1.ServiceAccount) + err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount) + switch { + case err == nil: + if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) { + c.logger.Info("No permission service account finalizer has already been removed", "name", serviceAccountName) + return + } + err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) { + controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + }) + if err != nil { + c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err) + return + } + c.requeue = true + c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName) + return + case err != nil && !kerrors.IsNotFound(err): + c.err = fmt.Errorf("failed to fetch service account: %w", err) + return + default: + c.logger.Info("No permission service account has already been deleted", "name", serviceAccountName) + return + } +} + +func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) { + if c.requeue || c.err != nil { + return + } + + githubSecretName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName] + if !ok { + c.logger.Info( + "Skipping cleaning up no permission service account", + "reason", + fmt.Sprintf("annotation key %q not present", AnnotationKeyGitHubSecretName), + ) + return + } + + c.logger.Info("Removing finalizer from GitHub secret", "name", githubSecretName) + + githubSecret := new(corev1.Secret) + err := c.client.Get(ctx, types.NamespacedName{Name: githubSecretName, Namespace: c.autoscalingRunnerSet.Namespace}, githubSecret) + switch { + case err == nil: + if !controllerutil.ContainsFinalizer(githubSecret, autoscalingRunnerSetCleanupFinalizerName) { + c.logger.Info("GitHub secret finalizer has already been removed", "name", githubSecretName) + return + } + err = patch(ctx, c.client, githubSecret, func(obj *corev1.Secret) { + controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + }) + if err != nil { + c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err) + return + } + c.requeue = true + c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName) + return + case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err): + c.err = fmt.Errorf("failed to fetch GitHub secret: %w", err) + return + default: + c.logger.Info("GitHub secret has already been deleted", "name", githubSecretName) + return + } +} + +func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) { + if c.requeue || c.err != nil { + return + } + + managerRoleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName] + if !ok { + c.logger.Info( + "Skipping cleaning up manager role binding", + "reason", + fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleBindingName), + ) + return + } + + c.logger.Info("Removing finalizer from manager role binding", "name", managerRoleBindingName) + + roleBinding := new(rbacv1.RoleBinding) + err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding) + switch { + case err == nil: + if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) { + c.logger.Info("Manager role binding finalizer has already been removed", "name", managerRoleBindingName) + return + } + err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) { + controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + }) + if err != nil { + c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err) + return + } + c.requeue = true + c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName) + return + case err != nil && !kerrors.IsNotFound(err): + c.err = fmt.Errorf("failed to fetch manager role binding: %w", err) + return + default: + c.logger.Info("Manager role binding has already been deleted", "name", managerRoleBindingName) + return + } +} + +func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) { + if c.requeue || c.err != nil { + return + } + + managerRoleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName] + if !ok { + c.logger.Info( + "Skipping cleaning up manager role", + "reason", + fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleName), + ) + return + } + + c.logger.Info("Removing finalizer from manager role", "name", managerRoleName) + + role := new(rbacv1.Role) + err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleName, Namespace: c.autoscalingRunnerSet.Namespace}, role) + switch { + case err == nil: + if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) { + c.logger.Info("Manager role finalizer has already been removed", "name", managerRoleName) + return + } + err = patch(ctx, c.client, role, func(obj *rbacv1.Role) { + controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + }) + if err != nil { + c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err) + return + } + c.requeue = true + c.logger.Info("Removed finalizer from manager role", "name", managerRoleName) + return + case err != nil && !kerrors.IsNotFound(err): + c.err = fmt.Errorf("failed to fetch manager role: %w", err) + return + default: + c.logger.Info("Manager role has already been deleted", "name", managerRoleName) + return + } +} + // NOTE: if this is logic should be used for other resources, // consider using generics type EphemeralRunnerSets struct { diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index 2fd0e61b00..6ad2f18a1e 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -13,6 +13,7 @@ import ( "time" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -23,6 +24,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/github/actions" @@ -571,6 +573,7 @@ var _ = Describe("Test AutoScalingController updates", func() { update := autoscalingRunnerSet.DeepCopy() update.Spec.RunnerScaleSetName = "testset_update" + err = k8sClient.Patch(ctx, update, client.MergeFrom(autoscalingRunnerSet)) Expect(err).NotTo(HaveOccurred(), "failed to update AutoScalingRunnerSet") @@ -1036,7 +1039,7 @@ var _ = Describe("Test Client optional configuration", func() { g.Expect(listener.Spec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "listener does not have TLS config") }, autoscalingRunnerSetTestTimeout, - autoscalingListenerTestInterval, + autoscalingRunnerSetTestInterval, ).Should(Succeed(), "tls config is incorrect") }) @@ -1093,8 +1096,372 @@ var _ = Describe("Test Client optional configuration", func() { g.Expect(runnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "EphemeralRunnerSpec does not have TLS config") }, autoscalingRunnerSetTestTimeout, - autoscalingListenerTestInterval, + autoscalingRunnerSetTestInterval, ).Should(Succeed()) }) }) }) + +var _ = Describe("Test external permissions cleanup", func() { + It("Should clean up kubernetes mode permissions", func() { + ctx := context.Background() + autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient) + + configSecret := createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) + + controller := &AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ControllerNamespace: autoscalingNS.Name, + DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", + ActionsClient: fake.NewMultiClient(), + } + err := controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + startManagers(GinkgoT(), mgr) + + min := 1 + max := 10 + autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + Labels: map[string]string{ + "app.kubernetes.io/name": "gha-runner-scale-set", + }, + Annotations: map[string]string{ + AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding", + AnnotationKeyKubernetesModeRoleName: "kube-mode-role", + AnnotationKeyKubernetesModeServiceAccountName: "kube-mode-service-account", + }, + }, + Spec: v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + MaxRunners: &max, + MinRunners: &min, + RunnerGroup: "testgroup", + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + role := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName], + Namespace: autoscalingRunnerSet.Namespace, + Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + }, + } + + err = k8sClient.Create(ctx, role) + Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role") + + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName], + Namespace: autoscalingRunnerSet.Namespace, + Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + }, + } + + err = k8sClient.Create(ctx, serviceAccount) + Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode service account") + + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName], + Namespace: autoscalingRunnerSet.Namespace, + Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: serviceAccount.Name, + Namespace: serviceAccount.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + // Kind is the type of resource being referenced + Kind: "Role", + Name: role.Name, + }, + } + + err = k8sClient.Create(ctx, roleBinding) + Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role binding") + + err = k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + Eventually( + func() (string, error) { + created := new(v1alpha1.AutoscalingRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created) + if err != nil { + return "", err + } + if len(created.Finalizers) == 0 { + return "", nil + } + return created.Finalizers[0], nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer") + + err = k8sClient.Delete(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set") + + err = k8sClient.Delete(ctx, roleBinding) + Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role binding") + + err = k8sClient.Delete(ctx, role) + Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role") + + err = k8sClient.Delete(ctx, serviceAccount) + Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode service account") + + Eventually( + func() bool { + r := new(rbacv1.RoleBinding) + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: roleBinding.Name, + Namespace: roleBinding.Namespace, + }, r) + + return errors.IsNotFound(err) + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeTrue(), "Expected role binding to be cleaned up") + + Eventually( + func() bool { + r := new(rbacv1.Role) + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: role.Name, + Namespace: role.Namespace, + }, r) + + return errors.IsNotFound(err) + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeTrue(), "Expected role to be cleaned up") + }) + + It("Should clean up manager permissions and no-permission service account", func() { + ctx := context.Background() + autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient) + + controller := &AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ControllerNamespace: autoscalingNS.Name, + DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", + ActionsClient: fake.NewMultiClient(), + } + err := controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + startManagers(GinkgoT(), mgr) + + min := 1 + max := 10 + autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + Labels: map[string]string{ + "app.kubernetes.io/name": "gha-runner-scale-set", + }, + Annotations: map[string]string{ + AnnotationKeyManagerRoleName: "manager-role", + AnnotationKeyManagerRoleBindingName: "manager-role-binding", + AnnotationKeyGitHubSecretName: "gh-secret-name", + AnnotationKeyNoPermissionServiceAccountName: "no-permission-sa", + }, + }, + Spec: v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + MaxRunners: &max, + MinRunners: &min, + RunnerGroup: "testgroup", + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName], + Namespace: autoscalingRunnerSet.Namespace, + Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + }, + Data: map[string][]byte{ + "github_token": []byte(defaultGitHubToken), + }, + } + + err = k8sClient.Create(context.Background(), secret) + Expect(err).NotTo(HaveOccurred(), "failed to create github secret") + + autoscalingRunnerSet.Spec.GitHubConfigSecret = secret.Name + + role := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName], + Namespace: autoscalingRunnerSet.Namespace, + Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + }, + } + + err = k8sClient.Create(ctx, role) + Expect(err).NotTo(HaveOccurred(), "failed to create manager role") + + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName], + Namespace: autoscalingRunnerSet.Namespace, + Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: role.Name, + }, + } + + err = k8sClient.Create(ctx, roleBinding) + Expect(err).NotTo(HaveOccurred(), "failed to create manager role binding") + + noPermissionServiceAccount := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName], + Namespace: autoscalingRunnerSet.Namespace, + Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + }, + } + + err = k8sClient.Create(ctx, noPermissionServiceAccount) + Expect(err).NotTo(HaveOccurred(), "failed to create no permission service account") + + err = k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") + + Eventually( + func() (string, error) { + created := new(v1alpha1.AutoscalingRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created) + if err != nil { + return "", err + } + if len(created.Finalizers) == 0 { + return "", nil + } + return created.Finalizers[0], nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer") + + err = k8sClient.Delete(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set") + + err = k8sClient.Delete(ctx, noPermissionServiceAccount) + Expect(err).NotTo(HaveOccurred(), "failed to delete no permission service account") + + err = k8sClient.Delete(ctx, secret) + Expect(err).NotTo(HaveOccurred(), "failed to delete GitHub secret") + + err = k8sClient.Delete(ctx, roleBinding) + Expect(err).NotTo(HaveOccurred(), "failed to delete manager role binding") + + err = k8sClient.Delete(ctx, role) + Expect(err).NotTo(HaveOccurred(), "failed to delete manager role") + + Eventually( + func() bool { + r := new(corev1.ServiceAccount) + err := k8sClient.Get( + ctx, + types.NamespacedName{ + Name: noPermissionServiceAccount.Name, + Namespace: noPermissionServiceAccount.Namespace, + }, + r, + ) + return errors.IsNotFound(err) + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeTrue(), "Expected no permission service account to be cleaned up") + + Eventually( + func() bool { + r := new(corev1.Secret) + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: secret.Name, + Namespace: secret.Namespace, + }, r) + + return errors.IsNotFound(err) + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeTrue(), "Expected role binding to be cleaned up") + + Eventually( + func() bool { + r := new(rbacv1.RoleBinding) + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: roleBinding.Name, + Namespace: roleBinding.Namespace, + }, r) + + return errors.IsNotFound(err) + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeTrue(), "Expected role binding to be cleaned up") + + Eventually( + func() bool { + r := new(rbacv1.Role) + err := k8sClient.Get( + ctx, + types.NamespacedName{ + Name: role.Name, + Namespace: role.Namespace, + }, + r, + ) + + return errors.IsNotFound(err) + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeTrue(), "Expected role to be cleaned up") + }) +}) diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go index 4ca6abc695..0ea81461e2 100644 --- a/controllers/actions.github.com/resourcebuilder.go +++ b/controllers/actions.github.com/resourcebuilder.go @@ -43,6 +43,17 @@ const ( labelKeyListenerNamespace = "auto-scaling-listener-namespace" ) +// Annotations applied for later cleanup of resources +const ( + AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding" + AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name" + AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name" + AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name" + AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name" + AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name" + AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name" +) + var commonLabelKeys = [...]string{ LabelKeyKubernetesPartOf, LabelKeyKubernetesComponent, From 95bc4f3ce098e7ee799e99ec1f451605adbbc525 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Tue, 4 Apr 2023 19:07:20 +0200 Subject: [PATCH 172/561] Add ImagePullPolicy to the AutoscalingListener, configurable through Manager env (#2477) --- .../v1alpha1/autoscalinglistener_types.go | 3 ++ ...tions.github.com_autoscalinglisteners.yaml | 3 ++ .../templates/deployment.yaml | 2 + .../tests/template_test.go | 45 +++++++++++-------- ...tions.github.com_autoscalinglisteners.yaml | 3 ++ config/manager/manager.yaml | 2 + .../actions.github.com/resourcebuilder.go | 18 +++++++- main.go | 7 +++ 8 files changed, 64 insertions(+), 19 deletions(-) diff --git a/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go b/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go index 8245865787..c5fedd7b06 100644 --- a/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go +++ b/apis/actions.github.com/v1alpha1/autoscalinglistener_types.go @@ -52,6 +52,9 @@ type AutoscalingListenerSpec struct { // Required Image string `json:"image,omitempty"` + // Required + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + // Required ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` diff --git a/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalinglisteners.yaml b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalinglisteners.yaml index 6df9c05192..d75ef5fe21 100644 --- a/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalinglisteners.yaml +++ b/charts/gha-runner-scale-set-controller/crds/actions.github.com_autoscalinglisteners.yaml @@ -80,6 +80,9 @@ spec: image: description: Required type: string + imagePullPolicy: + description: Required + type: string imagePullSecrets: description: Required items: diff --git a/charts/gha-runner-scale-set-controller/templates/deployment.yaml b/charts/gha-runner-scale-set-controller/templates/deployment.yaml index b624d963b5..1997d2b0c2 100644 --- a/charts/gha-runner-scale-set-controller/templates/deployment.yaml +++ b/charts/gha-runner-scale-set-controller/templates/deployment.yaml @@ -68,6 +68,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY + value: "{{ .Values.image.pullPolicy | default "IfNotPresent" }}" {{- with .Values.env }} {{- if kindIs "slice" . }} {{- toYaml . | nindent 8 }} diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index 3ee12f7df0..469cdecf44 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -349,13 +349,16 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value) assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath) + assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name) + assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go + assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources) assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1) @@ -434,8 +437,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"]) assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"]) - assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name) - assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) + assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name) + assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value) assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1) assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name) @@ -472,12 +475,15 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value) - assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name) - assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) + assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name) + assert.Equal(t, "Always", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go + + assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name) + assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value) assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath) @@ -698,13 +704,16 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) { assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value) assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath) + assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name) + assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go + assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources) assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1) @@ -745,17 +754,17 @@ func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) { assert.Equal(t, namespaceName, deployment.Namespace) assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 6) - assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Name) - assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) - assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[3].Name) - assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Name) - assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Key) - assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[3].ValueFrom.SecretKeyRef.Optional) - assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[4].Name) - assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[4].Value) - assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[5].Name) - assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].ValueFrom) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7) + assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name) + assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value) + assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].Name) + assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Name) + assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Key) + assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Optional) + assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[5].Name) + assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].Value) + assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[6].Name) + assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[6].ValueFrom) } func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) { diff --git a/config/crd/bases/actions.github.com_autoscalinglisteners.yaml b/config/crd/bases/actions.github.com_autoscalinglisteners.yaml index 6df9c05192..d75ef5fe21 100644 --- a/config/crd/bases/actions.github.com_autoscalinglisteners.yaml +++ b/config/crd/bases/actions.github.com_autoscalinglisteners.yaml @@ -80,6 +80,9 @@ spec: image: description: Required type: string + imagePullPolicy: + description: Required + type: string imagePullSecrets: description: Required items: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index f90df347f2..fb63c83a01 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -56,6 +56,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY + value: IfNotPresent volumeMounts: - name: controller-manager mountPath: "/etc/actions-runner-controller" diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go index 0ea81461e2..2ddba11b13 100644 --- a/controllers/actions.github.com/resourcebuilder.go +++ b/controllers/actions.github.com/resourcebuilder.go @@ -67,6 +67,21 @@ var commonLabelKeys = [...]string{ const labelValueKubernetesPartOf = "gha-runner-scale-set" +const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent + +// scaleSetListenerImagePullPolicy is applied to all listeners +var scaleSetListenerImagePullPolicy = DefaultScaleSetListenerImagePullPolicy + +func SetListenerImagePullPolicy(pullPolicy string) bool { + switch p := corev1.PullPolicy(pullPolicy); p { + case corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent: + scaleSetListenerImagePullPolicy = p + return true + default: + return false + } +} + type resourceBuilder struct{} func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod { @@ -161,7 +176,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A Name: autoscalingListenerContainerName, Image: autoscalingListener.Spec.Image, Env: listenerEnv, - ImagePullPolicy: corev1.PullIfNotPresent, + ImagePullPolicy: autoscalingListener.Spec.ImagePullPolicy, Command: []string{ "/github-runnerscaleset-listener", }, @@ -375,6 +390,7 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1. MinRunners: effectiveMinRunners, MaxRunners: effectiveMaxRunners, Image: image, + ImagePullPolicy: scaleSetListenerImagePullPolicy, ImagePullSecrets: imagePullSecrets, Proxy: autoscalingRunnerSet.Spec.Proxy, GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS, diff --git a/main.go b/main.go index ac9a79c542..aee0322572 100644 --- a/main.go +++ b/main.go @@ -170,6 +170,13 @@ func main() { } } + listenerPullPolicy := os.Getenv("CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY") + if ok := actionsgithubcom.SetListenerImagePullPolicy(listenerPullPolicy); ok { + log.Info("AutoscalingListener image pull policy changed", "ImagePullPolicy", listenerPullPolicy) + } else { + log.Info("Using default AutoscalingListener image pull policy", "ImagePullPolicy", actionsgithubcom.DefaultScaleSetListenerImagePullPolicy) + } + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, NewCache: newCache, From c83d9c3ef4b4f9bb738b7fe5914089677b41cbcc Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 4 Apr 2023 14:43:45 -0400 Subject: [PATCH 173/561] Treat `.ghe.com` domain as hosted environment (#2480) Co-authored-by: Nikola Jokic --- github/actions/config.go | 37 +++++++++++++++++++++------------ github/actions/config_test.go | 39 ++++++++++++++++++++++++++++++++++- 2 files changed, 62 insertions(+), 14 deletions(-) diff --git a/github/actions/config.go b/github/actions/config.go index f19cb17f4c..50f23213ba 100644 --- a/github/actions/config.go +++ b/github/actions/config.go @@ -3,6 +3,7 @@ package actions import ( "fmt" "net/url" + "os" "strings" ) @@ -34,9 +35,7 @@ func ParseGitHubConfigFromURL(in string) (*GitHubConfig, error) { return nil, err } - isHosted := u.Host == "github.com" || - u.Host == "www.github.com" || - u.Host == "github.localhost" + isHosted := isHostedGitHubURL(u) configURL := &GitHubConfig{ ConfigURL: u, @@ -76,23 +75,35 @@ func ParseGitHubConfigFromURL(in string) (*GitHubConfig, error) { func (c *GitHubConfig) GitHubAPIURL(path string) *url.URL { result := &url.URL{ Scheme: c.ConfigURL.Scheme, + Host: c.ConfigURL.Host, // default for Enterprise mode + Path: "/api/v3", // default for Enterprise mode } - switch c.ConfigURL.Host { - // Hosted - case "github.com", "github.localhost": + isHosted := isHostedGitHubURL(c.ConfigURL) + + if isHosted { result.Host = fmt.Sprintf("api.%s", c.ConfigURL.Host) - // re-routing www.github.com to api.github.com - case "www.github.com": - result.Host = "api.github.com" + result.Path = "" - // Enterprise - default: - result.Host = c.ConfigURL.Host - result.Path = "/api/v3" + if strings.EqualFold("www.github.com", c.ConfigURL.Host) { + // re-routing www.github.com to api.github.com + result.Host = "api.github.com" + } } result.Path += path return result } + +func isHostedGitHubURL(u *url.URL) bool { + _, forceGhes := os.LookupEnv("GITHUB_ACTIONS_FORCE_GHES") + if forceGhes { + return false + } + + return strings.EqualFold(u.Host, "github.com") || + strings.EqualFold(u.Host, "www.github.com") || + strings.EqualFold(u.Host, "github.localhost") || + strings.HasSuffix(u.Host, ".ghe.com") +} diff --git a/github/actions/config_test.go b/github/actions/config_test.go index e21f7e9e94..99b6459b3e 100644 --- a/github/actions/config_test.go +++ b/github/actions/config_test.go @@ -3,6 +3,7 @@ package actions_test import ( "errors" "net/url" + "os" "strings" "testing" @@ -117,6 +118,16 @@ func TestGitHubConfig(t *testing.T) { IsHosted: false, }, }, + { + configURL: "https://my-ghes.ghe.com/org/", + expected: &actions.GitHubConfig{ + Scope: actions.GitHubScopeOrganization, + Enterprise: "", + Organization: "org", + Repository: "", + IsHosted: true, + }, + }, } for _, test := range tests { @@ -151,9 +162,35 @@ func TestGitHubConfig_GitHubAPIURL(t *testing.T) { t.Run("when hosted", func(t *testing.T) { config, err := actions.ParseGitHubConfigFromURL("https://github.com/org/repo") require.NoError(t, err) + assert.True(t, config.IsHosted) result := config.GitHubAPIURL("/some/path") assert.Equal(t, "https://api.github.com/some/path", result.String()) }) - t.Run("when not hosted", func(t *testing.T) {}) + t.Run("when hosted with ghe.com", func(t *testing.T) { + config, err := actions.ParseGitHubConfigFromURL("https://github.ghe.com/org/repo") + require.NoError(t, err) + assert.True(t, config.IsHosted) + + result := config.GitHubAPIURL("/some/path") + assert.Equal(t, "https://api.github.ghe.com/some/path", result.String()) + }) + t.Run("when not hosted", func(t *testing.T) { + config, err := actions.ParseGitHubConfigFromURL("https://ghes.com/org/repo") + require.NoError(t, err) + assert.False(t, config.IsHosted) + + result := config.GitHubAPIURL("/some/path") + assert.Equal(t, "https://ghes.com/api/v3/some/path", result.String()) + }) + t.Run("when not hosted with ghe.com", func(t *testing.T) { + os.Setenv("GITHUB_ACTIONS_FORCE_GHES", "1") + defer os.Unsetenv("GITHUB_ACTIONS_FORCE_GHES") + config, err := actions.ParseGitHubConfigFromURL("https://test.ghe.com/org/repo") + require.NoError(t, err) + assert.False(t, config.IsHosted) + + result := config.GitHubAPIURL("/some/path") + assert.Equal(t, "https://test.ghe.com/api/v3/some/path", result.String()) + }) } From 1bc80dab4b6ac4a7c42a9f4861c44a39f32021f7 Mon Sep 17 00:00:00 2001 From: Tingluo Huang Date: Tue, 4 Apr 2023 15:15:11 -0400 Subject: [PATCH 174/561] Remove deprecated method. (#2481) --- testing/random.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/testing/random.go b/testing/random.go index 1795138bba..560cc8dd2b 100644 --- a/testing/random.go +++ b/testing/random.go @@ -5,17 +5,17 @@ import ( "time" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - const letterBytes = "abcdefghijklmnopqrstuvwxyz" +var ( + random = rand.New(rand.NewSource(time.Now().UnixNano())) +) + // Copied from https://stackoverflow.com/a/31832326 with thanks func RandStringBytesRmndr(n int) string { b := make([]byte, n) for i := range b { - b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))] + b[i] = letterBytes[random.Int63()%int64(len(letterBytes))] } return string(b) } From 9c205213307862ffe4f8cc6a9cfb71ed6681ce46 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Wed, 5 Apr 2023 14:56:27 +0200 Subject: [PATCH 175/561] gha-runner-scale-set 0.4.0 release (#2467) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- .../Chart.yaml | 4 +- charts/gha-runner-scale-set/Chart.yaml | 4 +- .../gha-runner-scale-set-controller/README.md | 41 ++++++++++++++++--- 3 files changed, 39 insertions(+), 10 deletions(-) diff --git a/charts/gha-runner-scale-set-controller/Chart.yaml b/charts/gha-runner-scale-set-controller/Chart.yaml index 114c431126..90b6685e53 100644 --- a/charts/gha-runner-scale-set-controller/Chart.yaml +++ b/charts/gha-runner-scale-set-controller/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.0 +version: 0.4.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.3.0" +appVersion: "0.4.0" home: https://github.com/actions/actions-runner-controller diff --git a/charts/gha-runner-scale-set/Chart.yaml b/charts/gha-runner-scale-set/Chart.yaml index df3a4a9ab6..bbac8c6dc9 100644 --- a/charts/gha-runner-scale-set/Chart.yaml +++ b/charts/gha-runner-scale-set/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.0 +version: 0.4.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.3.0" +appVersion: "0.4.0" home: https://github.com/actions/dev-arc diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index 50c1ada836..2153414d85 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -36,7 +36,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 --namespace "${NAMESPACE}" \ --create-namespace \ oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \ - --version 0.3.0 + --version 0.4.0 ``` 1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app). @@ -57,7 +57,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 --create-namespace \ --set githubConfigUrl="${GITHUB_CONFIG_URL}" \ --set githubConfigSecret.github_token="${GITHUB_PAT}" \ - oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.3.0 + oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0 ``` ```bash @@ -75,7 +75,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 --set githubConfigSecret.github_app_id="${GITHUB_APP_ID}" \ --set githubConfigSecret.github_app_installation_id="${GITHUB_APP_INSTALLATION_ID}" \ --set githubConfigSecret.github_app_private_key="${GITHUB_APP_PRIVATE_KEY}" \ - oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.3.0 + oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0 ``` 1. Check your installation. If everything went well, you should see the following: @@ -84,8 +84,8 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7 $ helm list -n "${NAMESPACE}" NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION - arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.3.0 preview - arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.3.0 0.3.0 + arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.4.0 preview + arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.4.0 0.4.0 ``` ```bash @@ -140,7 +140,7 @@ Upgrading actions-runner-controller requires a few extra steps because CRDs will ```bash helm pull oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \ - --version 0.3.0 \ + --version 0.4.0 \ --untar && \ kubectl replace -f /gha-runner-scale-set-controller/crds/ ``` @@ -237,6 +237,35 @@ To fix this, you can either: ## Changelog +### v0.4.0 + +#### ⚠️ Warning + +This release contains a major change related to the way permissions are +applied to the manager ([#2276](https://github.com/actions/actions-runner-controller/pull/2276) and [#2363](https://github.com/actions/actions-runner-controller/pull/2363)). + +Please evaluate these changes carefully before upgrading. + +#### Major changes + +1. Surface EphemeralRunnerSet stats to AutoscalingRunnerSet [#2382](https://github.com/actions/actions-runner-controller/pull/2382) +1. Improved security posture by removing list/watch secrets permission from manager cluster role + [#2276](https://github.com/actions/actions-runner-controller/pull/2276) +1. Improved security posture by delaying role/rolebinding creation to gha-runner-scale-set during installation + [#2363](https://github.com/actions/actions-runner-controller/pull/2363) +1. Improved security posture by supporting watching a single namespace from the controller + [#2374](https://github.com/actions/actions-runner-controller/pull/2374) +1. Added labels to AutoscalingRunnerSet subresources to allow easier inspection [#2391](https://github.com/actions/actions-runner-controller/pull/2391) +1. Fixed bug preventing env variables from being specified + [#2450](https://github.com/actions/actions-runner-controller/pull/2450) +1. Enhance quickstart troubleshooting guides + [#2435](https://github.com/actions/actions-runner-controller/pull/2435) +1. Fixed ignore extra dind container when container mode type is "dind" + [#2418](https://github.com/actions/actions-runner-controller/pull/2418) +1. Added additional cleanup finalizers [#2433](https://github.com/actions/actions-runner-controller/pull/2433) +1. gha-runner-scale-set listener pod inherits the ImagePullPolicy from the manager pod [#2477](https://github.com/actions/actions-runner-controller/pull/2477) +1. Treat `.ghe.com` domain as hosted environment [#2480](https://github.com/actions/actions-runner-controller/pull/2480) + ### v0.3.0 #### Major changes From 3a9bcda0990ae21627f86c079023c2a2355094c9 Mon Sep 17 00:00:00 2001 From: Hidetake Iwata Date: Wed, 5 Apr 2023 22:39:29 +0900 Subject: [PATCH 176/561] chart: Bump version to 0.23.1 (#2483) --- charts/actions-runner-controller/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/actions-runner-controller/Chart.yaml b/charts/actions-runner-controller/Chart.yaml index b48ac505c4..16906dc286 100644 --- a/charts/actions-runner-controller/Chart.yaml +++ b/charts/actions-runner-controller/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.23.0 +version: 0.23.1 # Used as the default manager tag value when no tag property is provided in the values.yaml -appVersion: 0.27.1 +appVersion: 0.27.2 home: https://github.com/actions/actions-runner-controller From 74b41ab8df74b6549b75693a77e6c5aada276fcd Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Thu, 6 Apr 2023 01:20:12 +0900 Subject: [PATCH 177/561] Fix chart publishing workflow (#2487) --- .github/workflows/publish-chart.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/publish-chart.yaml b/.github/workflows/publish-chart.yaml index a307a36e8f..0d2b5d2c7b 100644 --- a/.github/workflows/publish-chart.yaml +++ b/.github/workflows/publish-chart.yaml @@ -183,7 +183,9 @@ jobs: # - https://github.com/actions/actions-runner-controller/pull/2452 - name: Commit and push to actions/actions-runner-controller run: | + git stash git checkout gh-pages + git stash pop git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" git add . From d326d88dcebadbdaec6981eabfd6d9ba869dc8bd Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Thu, 6 Apr 2023 14:01:48 +0200 Subject: [PATCH 178/561] Fix the publish chart workflow (#2489) Co-authored-by: Nikola Jokic --- .github/workflows/publish-chart.yaml | 360 +++++++++++++-------------- 1 file changed, 171 insertions(+), 189 deletions(-) diff --git a/.github/workflows/publish-chart.yaml b/.github/workflows/publish-chart.yaml index 0d2b5d2c7b..bdc796d69e 100644 --- a/.github/workflows/publish-chart.yaml +++ b/.github/workflows/publish-chart.yaml @@ -5,15 +5,21 @@ name: Publish Helm Chart on: push: branches: - - master + - master paths: - - 'charts/**' - - '.github/workflows/publish-chart.yaml' - - '!charts/actions-runner-controller/docs/**' - - '!charts/gha-runner-scale-set-controller/**' - - '!charts/gha-runner-scale-set/**' - - '!**.md' + - 'charts/**' + - '.github/workflows/publish-chart.yaml' + - '!charts/actions-runner-controller/docs/**' + - '!charts/gha-runner-scale-set-controller/**' + - '!charts/gha-runner-scale-set/**' + - '!**.md' workflow_dispatch: + inputs: + force: + description: 'Force publish even if the chart version is not bumped' + type: boolean + required: true + default: false env: KUBE_SCORE_VERSION: 1.10.0 @@ -29,91 +35,86 @@ jobs: outputs: publish-chart: ${{ steps.publish-chart-step.outputs.publish }} steps: - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set up Helm - uses: azure/setup-helm@v3.4 - with: - version: ${{ env.HELM_VERSION }} - - - name: Set up kube-score - run: | - wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score - chmod 755 kube-score - - - name: Kube-score generated manifests - run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - - --ignore-test pod-networkpolicy - --ignore-test deployment-has-poddisruptionbudget - --ignore-test deployment-has-host-podantiaffinity - --ignore-test container-security-context - --ignore-test pod-probes - --ignore-test container-image-tag - --enable-optional-test container-security-context-privileged - --enable-optional-test container-security-context-readonlyrootfilesystem - - # python is a requirement for the chart-testing action below (supports yamllint among other tests) - - uses: actions/setup-python@v4 - with: - python-version: '3.7' - - - name: Set up chart-testing - uses: helm/chart-testing-action@v2.3.1 - - - name: Run chart-testing (list-changed) - id: list-changed - run: | - changed=$(ct list-changed --config charts/.ci/ct-config.yaml) - if [[ -n "$changed" ]]; then - echo "::set-output name=changed::true" - fi - - - name: Run chart-testing (lint) - run: | - ct lint --config charts/.ci/ct-config.yaml - - - name: Create kind cluster - if: steps.list-changed.outputs.changed == 'true' - uses: helm/kind-action@v1.4.0 - - # We need cert-manager already installed in the cluster because we assume the CRDs exist - - name: Install cert-manager - if: steps.list-changed.outputs.changed == 'true' - run: | - helm repo add jetstack https://charts.jetstack.io --force-update - helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait - - - name: Run chart-testing (install) - if: steps.list-changed.outputs.changed == 'true' - run: ct install --config charts/.ci/ct-config.yaml - - # WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml - - name: Check if Chart Publish is Needed - id: publish-chart-step - run: | - CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml) - NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2) - RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4) - LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1) - echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV - echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV - if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION ]]; then - echo "publish=true" >> $GITHUB_OUTPUT - else - echo "publish=false" >> $GITHUB_OUTPUT - fi - - - name: Job summary - run: | - echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "**Status:**" >> $GITHUB_STEP_SUMMARY - echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY - echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY - echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set up Helm + uses: azure/setup-helm@v3.4 + with: + version: ${{ env.HELM_VERSION }} + + - name: Set up kube-score + run: | + wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score + chmod 755 kube-score + + - name: Kube-score generated manifests + run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem + + # python is a requirement for the chart-testing action below (supports yamllint among other tests) + - uses: actions/setup-python@v4 + with: + python-version: '3.11' + + - name: Set up chart-testing + uses: helm/chart-testing-action@v2.3.1 + + - name: Run chart-testing (list-changed) + id: list-changed + run: | + changed=$(ct list-changed --config charts/.ci/ct-config.yaml) + if [[ -n "$changed" ]]; then + echo "::set-output name=changed::true" + fi + + - name: Run chart-testing (lint) + run: | + ct lint --config charts/.ci/ct-config.yaml + + - name: Create kind cluster + if: steps.list-changed.outputs.changed == 'true' + uses: helm/kind-action@v1.4.0 + + # We need cert-manager already installed in the cluster because we assume the CRDs exist + - name: Install cert-manager + if: steps.list-changed.outputs.changed == 'true' + run: | + helm repo add jetstack https://charts.jetstack.io --force-update + helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait + + - name: Run chart-testing (install) + if: steps.list-changed.outputs.changed == 'true' + run: ct install --config charts/.ci/ct-config.yaml + + # WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml + - name: Check if Chart Publish is Needed + id: publish-chart-step + run: | + CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml) + NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2) + RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4) + LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1) + + echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV + echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV + + # Always publish if force is true + if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then + echo "publish=true" >> $GITHUB_OUTPUT + else + echo "publish=false" >> $GITHUB_OUTPUT + fi + + - name: Job summary + run: | + echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Status:**" >> $GITHUB_STEP_SUMMARY + echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY + echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY + echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY publish-chart: if: needs.lint-chart.outputs.publish-chart == 'true' @@ -121,105 +122,86 @@ jobs: name: Publish Chart runs-on: ubuntu-latest permissions: - contents: write # for helm/chart-releaser-action to push chart release and create a release + contents: write # for helm/chart-releaser-action to push chart release and create a release env: CHART_TARGET_ORG: actions-runner-controller CHART_TARGET_REPO: actions-runner-controller.github.io CHART_TARGET_BRANCH: master - + steps: - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Configure Git - run: | - git config user.name "$GITHUB_ACTOR" - git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - - - name: Get Token - id: get_workflow_token - uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db - with: - application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} - application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} - organization: ${{ env.CHART_TARGET_ORG }} - - - name: Install chart-releaser - uses: helm/chart-releaser-action@v1.4.1 - with: - install_only: true - install_dir: ${{ github.workspace }}/bin - - - name: Package and upload release assets - run: | - cr package \ - ${{ github.workspace }}/charts/actions-runner-controller/ \ - --package-path .cr-release-packages - - cr upload \ - --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ - --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ - --package-path .cr-release-packages \ - --token ${{ secrets.GITHUB_TOKEN }} - - - name: Generate updated index.yaml - run: | - cr index \ - --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ - --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ - --index-path ${{ github.workspace }}/index.yaml \ - --pages-branch 'gh-pages' \ - --pages-index-path 'index.yaml' - - # This step is required to not throw away changes made to the index.yaml on every new chart release. - # - # We update the index.yaml in the actions-runner-controller.github.io repo - # by appending the new chart version to the index.yaml saved in actions-runner-controller repo - # and copying and commiting the updated index.yaml to the github.io one. - # See below for more context: - # - https://github.com/actions-runner-controller/actions-runner-controller.github.io/pull/2 - # - https://github.com/actions/actions-runner-controller/pull/2452 - - name: Commit and push to actions/actions-runner-controller - run: | - git stash - git checkout gh-pages - git stash pop - git config user.name "$GITHUB_ACTOR" - git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - git add . - git commit -m "Update index.yaml" - git push - working-directory: ${{ github.workspace }} - - # Chart Release was never intended to publish to a different repo - # this workaround is intended to move the index.yaml to the target repo - # where the github pages are hosted - - name: Checkout target repository - uses: actions/checkout@v3 - with: - repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }} - path: ${{ env.CHART_TARGET_REPO }} - ref: ${{ env.CHART_TARGET_BRANCH }} - token: ${{ steps.get_workflow_token.outputs.token }} - - - name: Copy index.yaml - run: | - cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml - - - name: Commit and push to target repository - run: | - git config user.name "$GITHUB_ACTOR" - git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - git add . - git commit -m "Update index.yaml" - git push - working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }} - - - name: Job summary - run: | - echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "**Status:**" >> $GITHUB_STEP_SUMMARY - echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Get Token + id: get_workflow_token + uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db + with: + application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} + application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} + organization: ${{ env.CHART_TARGET_ORG }} + + - name: Install chart-releaser + uses: helm/chart-releaser-action@v1.4.1 + with: + install_only: true + install_dir: ${{ github.workspace }}/bin + + - name: Package and upload release assets + run: | + cr package \ + ${{ github.workspace }}/charts/actions-runner-controller/ \ + --package-path .cr-release-packages + + cr upload \ + --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ + --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ + --package-path .cr-release-packages \ + --token ${{ secrets.GITHUB_TOKEN }} + + - name: Generate updated index.yaml + run: | + cr index \ + --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ + --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ + --index-path ${{ github.workspace }}/index.yaml \ + --push \ + --pages-branch 'gh-pages' \ + --pages-index-path 'index.yaml' + + # Chart Release was never intended to publish to a different repo + # this workaround is intended to move the index.yaml to the target repo + # where the github pages are hosted + - name: Checkout target repository + uses: actions/checkout@v3 + with: + repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }} + path: ${{ env.CHART_TARGET_REPO }} + ref: ${{ env.CHART_TARGET_BRANCH }} + token: ${{ steps.get_workflow_token.outputs.token }} + + - name: Copy index.yaml + run: | + cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml + + - name: Commit and push to target repository + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + git add . + git commit -m "Update index.yaml" + git push + working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }} + + - name: Job summary + run: | + echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Status:**" >> $GITHUB_STEP_SUMMARY + echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY From c028449e3b64b9f08f50b48122b90987972cb4cd Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Mon, 10 Apr 2023 08:49:32 +0200 Subject: [PATCH 179/561] Extend manager roles to accept ephemeralrunnerset/finalizers (#2493) --- .../templates/manager_cluster_role.yaml | 7 +++++++ .../templates/manager_single_namespace_watch_role.yaml | 7 +++++++ .../gha-runner-scale-set-controller/tests/template_test.go | 4 ++-- config/rbac/role.yaml | 7 +++++++ .../actions.github.com/ephemeralrunnerset_controller.go | 1 + 5 files changed, 24 insertions(+), 2 deletions(-) diff --git a/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml index 0ee3bb5395..cc58e3c25b 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_cluster_role.yaml @@ -78,6 +78,13 @@ rules: - get - patch - update +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets/finalizers + verbs: + - patch + - update - apiGroups: - actions.github.com resources: diff --git a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml index f195da55cc..f0f653d766 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml @@ -52,6 +52,13 @@ rules: - get - patch - update +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets/finalizers + verbs: + - patch + - update - apiGroups: - actions.github.com resources: diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index 469cdecf44..a097b433ef 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -169,7 +169,7 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) { assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace") assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name) - assert.Equal(t, 15, len(managerClusterRole.Rules)) + assert.Equal(t, 16, len(managerClusterRole.Rules)) _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"}) assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role.yaml in chart", "We should get an error because the template should be skipped") @@ -843,7 +843,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) { assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name) assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace) - assert.Equal(t, 13, len(managerSingleNamespaceWatchRole.Rules)) + assert.Equal(t, 14, len(managerSingleNamespaceWatchRole.Rules)) } func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) { diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index b80a869de4..ee70bd3847 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -102,6 +102,13 @@ rules: - patch - update - watch +- apiGroups: + - actions.github.com + resources: + - ephemeralrunnersets/finalizers + verbs: + - patch + - update - apiGroups: - actions.github.com resources: diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index 6a90ec78af..f4850cf5e3 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -56,6 +56,7 @@ type EphemeralRunnerSetReconciler struct { //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/finalizers,verbs=update;patch //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get From ba2c48c73178d3cbdc2b3956b041ce6f14b4731f Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Mon, 10 Apr 2023 21:03:02 +0200 Subject: [PATCH 180/561] Fix e2e tests infinite looping when waiting for resources (#2496) Co-authored-by: Tingluo Huang --- .github/workflows/e2e-test-linux-vm.yaml | 54 +++++++++++++++++------- 1 file changed, 38 insertions(+), 16 deletions(-) diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index b7d5cf1349..6dd39fb4c4 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -5,7 +5,7 @@ on: branches: - master pull_request: - branches: + branches: - master workflow_dispatch: @@ -21,6 +21,7 @@ env: jobs: default-setup: runs-on: ubuntu-latest + timeout-minutes: 20 if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: "arc-test-workflow.yaml" @@ -55,11 +56,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl get pod -n arc-systems @@ -84,11 +86,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems @@ -107,6 +110,7 @@ jobs: single-namespace-setup: runs-on: ubuntu-latest + timeout-minutes: 20 if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: "arc-test-workflow.yaml" @@ -143,11 +147,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl get pod -n arc-systems @@ -172,11 +177,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems @@ -195,6 +201,7 @@ jobs: dind-mode-setup: runs-on: ubuntu-latest + timeout-minutes: 20 if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: arc-test-dind-workflow.yaml @@ -229,11 +236,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl get pod -n arc-systems @@ -259,11 +267,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems @@ -282,6 +291,7 @@ jobs: kubernetes-mode-setup: runs-on: ubuntu-latest + timeout-minutes: 20 if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml" @@ -321,11 +331,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl get pod -n arc-systems @@ -355,11 +366,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems @@ -378,6 +390,7 @@ jobs: auth-proxy-setup: runs-on: ubuntu-latest + timeout-minutes: 20 if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: "arc-test-workflow.yaml" @@ -412,11 +425,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl get pod -n arc-systems @@ -453,11 +467,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems @@ -476,6 +491,7 @@ jobs: anonymous-proxy-setup: runs-on: ubuntu-latest + timeout-minutes: 20 if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: "arc-test-workflow.yaml" @@ -510,11 +526,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl get pod -n arc-systems @@ -545,11 +562,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems @@ -568,6 +586,7 @@ jobs: self-signed-ca-setup: runs-on: ubuntu-latest + timeout-minutes: 20 if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id env: WORKFLOW_FILE: "arc-test-workflow.yaml" @@ -602,11 +621,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl get pod -n arc-systems @@ -629,11 +649,12 @@ jobs: cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for mitmproxy generate its CA cert" exit 1 fi sleep 1 + count=$((count+1)) done sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt @@ -661,11 +682,12 @@ jobs: echo "Pod found: $POD_NAME" break fi - if [ "$count" -ge 10 ]; then + if [ "$count" -ge 60 ]; then echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" exit 1 fi sleep 1 + count=$((count+1)) done kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl get pod -n arc-systems From 6175fba0d0866436767c686de97db0287fbf595a Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Tue, 11 Apr 2023 16:25:43 +0200 Subject: [PATCH 181/561] Update limit manager role permissions ADR (#2500) Co-authored-by: Tingluo Huang --- .../2022-12-05-adding-labels-k8s-resources.md | 2 +- ...023-02-10-limit-manager-role-permission.md | 4 +- ...2023-03-14-adding-labels-k8s-resources.md} | 0 ...023-04-11-limit-manager-role-permission.md | 167 ++++++++++++++++++ 4 files changed, 171 insertions(+), 2 deletions(-) rename docs/adrs/{2023-04-14-adding-labels-k8s-resources.md => 2023-03-14-adding-labels-k8s-resources.md} (100%) create mode 100644 docs/adrs/2023-04-11-limit-manager-role-permission.md diff --git a/docs/adrs/2022-12-05-adding-labels-k8s-resources.md b/docs/adrs/2022-12-05-adding-labels-k8s-resources.md index 859e141496..2a2023ad89 100644 --- a/docs/adrs/2022-12-05-adding-labels-k8s-resources.md +++ b/docs/adrs/2022-12-05-adding-labels-k8s-resources.md @@ -86,4 +86,4 @@ Or for example if they're having problems specifically with runners: This way users don't have to understand ARC moving parts but we still have a way to target them specifically if we need to. -[^1]: Superseded by [ADR 2023-04-14](2023-04-14-adding-labels-k8s-resources.md) +[^1]: Superseded by [ADR 2023-03-14](2023-03-14-adding-labels-k8s-resources.md) diff --git a/docs/adrs/2023-02-10-limit-manager-role-permission.md b/docs/adrs/2023-02-10-limit-manager-role-permission.md index d327b4f874..eb3d453c0b 100644 --- a/docs/adrs/2023-02-10-limit-manager-role-permission.md +++ b/docs/adrs/2023-02-10-limit-manager-role-permission.md @@ -2,7 +2,7 @@ **Date**: 2023-02-10 -**Status**: Done +**Status**: Superceded [^1] ## Context @@ -136,3 +136,5 @@ The downside of this mode: - When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other. - You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster. + +[^1]: Superseded by [ADR 2023-04-11](2023-04-11-limit-manager-role-permission.md) diff --git a/docs/adrs/2023-04-14-adding-labels-k8s-resources.md b/docs/adrs/2023-03-14-adding-labels-k8s-resources.md similarity index 100% rename from docs/adrs/2023-04-14-adding-labels-k8s-resources.md rename to docs/adrs/2023-03-14-adding-labels-k8s-resources.md diff --git a/docs/adrs/2023-04-11-limit-manager-role-permission.md b/docs/adrs/2023-04-11-limit-manager-role-permission.md new file mode 100644 index 0000000000..3ed23dfd06 --- /dev/null +++ b/docs/adrs/2023-04-11-limit-manager-role-permission.md @@ -0,0 +1,167 @@ +# ADR 2023-04-11: Limit Permissions for Service Accounts in Actions-Runner-Controller + +**Date**: 2023-04-11 + +**Status**: Done [^1] + +## Context + +- `actions-runner-controller` is a Kubernetes CRD (with controller) built using https://github.com/kubernetes-sigs/controller-runtime + +- [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) has a default cache based k8s API client.Reader to make query k8s API server more efficiency. + +- The cache-based API client requires cluster scope `list` and `watch` permission for any resource the controller may query. + +- This documentation only scopes to the AutoscalingRunnerSet CRD and its controller. + +## Service accounts and their role binding in actions-runner-controller + +There are 3 service accounts involved for a working `AutoscalingRunnerSet` based `actions-runner-controller` + +1. Service account for each Ephemeral runner Pod + +This should have the lowest privilege (not any `RoleBinding` nor `ClusterRoleBinding`) by default, in the case of `containerMode=kubernetes`, it will get certain write permission with `RoleBinding` to limit the permission to a single namespace. + +> References: +> +> - ./charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml +> - ./charts/gha-runner-scale-set/templates/kube_mode_role.yaml +> - ./charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml +> - ./charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml + +2. Service account for AutoScalingListener Pod + +This has a `RoleBinding` to a single namespace with a `Role` that has permission to `PATCH` `EphemeralRunnerSet` and `EphemeralRunner`. + +3. Service account for the controller manager + +Since the CRD controller is a singleton installed in the cluster that manages the CRD across multiple namespaces by default, the service account of the controller manager pod has a `ClusterRoleBinding` to a `ClusterRole` with broader permissions. + +The current `ClusterRole` has the following permissions: + +- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingRunnerSets` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingListeners` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunnerSets` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunners` (with `Status` and `Finalizer` sub-resource) + +- Get/List/Create/Delete/Update/Patch/Watch on `Pods` (with `Status` sub-resource) +- **Get/List/Create/Delete/Update/Patch/Watch on `Secrets`** +- Get/List/Create/Delete/Update/Patch/Watch on `Roles` +- Get/List/Create/Delete/Update/Patch/Watch on `RoleBindings` +- Get/List/Create/Delete/Update/Patch/Watch on `ServiceAccounts` + +> Full list can be found at: https://github.com/actions/actions-runner-controller/blob/facae69e0b189d3b5dd659f36df8a829516d2896/charts/actions-runner-controller-2/templates/manager_role.yaml + +## Limit cluster role permission on Secrets + +The cluster scope `List` `Secrets` permission might be a blocker for adopting `actions-runner-controller` for certain customers as they may have certain restriction in their cluster that simply doesn't allow any service account to have cluster scope `List Secrets` permission. + +To help these customers and improve security for `actions-runner-controller` in general, we will try to limit the `ClusterRole` permission of the controller manager's service account down to the following: + +- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingRunnerSets` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingListeners` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunnerSets` (with `Status` and `Finalizer` sub-resource) +- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunners` (with `Status` and `Finalizer` sub-resource) + +- List/Watch on `Pods` +- List/Watch/Patch on `Roles` +- List/Watch on `RoleBindings` +- List/Watch on `ServiceAccounts` + +> We will change the default cache-based client to bypass cache on reading `Secrets` and `ConfigMaps`(ConfigMap is used when you configure `githubServerTLS`), so we can eliminate the need for `List` and `Watch` `Secrets` permission in cluster scope. + +Introduce a new `Role` for the controller and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace the controller is deployed. This role will grant the controller's service account required permission to work with `AutoScalingListeners` in the controller namespace. + +- Get/Create/Delete on `Pods` +- Get on `Pods/status` +- Get/Create/Delete/Update/Patch on `Secrets` +- Get/Create/Delete/Update/Patch on `ServiceAccounts` + +The `Role` and `RoleBinding` creation will happen during the `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller` + +During `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`, we will store the controller's service account info as labels on the controller `Deployment`. +Ex: + +```yaml +actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }} +actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} +``` + +Introduce a new `Role` per `AutoScalingRunnerSet` installation and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace that each `AutoScalingRunnerSet` deployed with the following permission. + +- Get/Create/Delete/Update/Patch/List on `Secrets` +- Create/Delete on `Pods` +- Get on `Pods/status` +- Get/Create/Delete/Update/Patch on `Roles` +- Get/Create/Delete/Update/Patch on `RoleBindings` +- Get on `ConfigMaps` + +The `Role` and `RoleBinding` creation will happen during `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set` to grant the controller's service account required permissions to operate in the namespace the `AutoScalingRunnerSet` deployed. + +The `gha-runner-scale-set` helm chart will try to find the `Deployment` of the controller using `helm lookup`, and get the service account info from the labels of the controller `Deployment` (`actions.github.com/controller-service-account-namespace` and `actions.github.com/controller-service-account-name`). + +The `gha-runner-scale-set` helm chart will use this service account to properly render the `RoleBinding` template. + +The `gha-runner-scale-set` helm chart will also allow customers to explicitly provide the controller service account info, in case the `helm lookup` couldn't locate the right controller `Deployment`. + +New sections in `values.yaml` of `gha-runner-scale-set`: + +```yaml +## Optional controller service account that needs to have required Role and RoleBinding +## to operate this gha-runner-scale-set installation. +## The helm chart will try to find the controller deployment and its service account at installation time. +## In case the helm chart can't find the right service account, you can explicitly pass in the following value +## to help it finish RoleBinding with the right service account. +## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly. +controllerServiceAccount: + namespace: arc-system + name: test-arc-gha-runner-scale-set-controller +``` + +## Install ARC to only watch/react resources in a single namespace + +In case the user doesn't want to have any `ClusterRole`, they can choose to install the `actions-runner-controller` in a mode that only requires a `Role` with `RoleBinding` in a particular namespace. + +In this mode, the `actions-runner-controller` will only be able to watch the `AutoScalingRunnerSet` resource in a single namespace. + +If you want to deploy multiple `AutoScalingRunnerSet` into different namespaces, you will need to install `actions-runner-controller` in this mode multiple times as well and have each installation watch the namespace you want to deploy an `AutoScalingRunnerSet` + +You will install `actions-runner-controller` with something like `helm install arc --namespace arc-system --set watchSingleNamespace=test-namespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller` (the `test-namespace` namespace needs to be created first). + +You will deploy the `AutoScalingRunnerSet` with something like `helm install demo --namespace TestNamespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set` + +In this mode, you will end up with a manager `Role` that has all Get/List/Create/Delete/Update/Patch/Watch permissions on resources we need, and a `RoleBinding` to bind the `Role` with the controller `ServiceAccount` in the watched single namespace and the controller namespace, ex: `test-namespace` and `arc-system` in the above example. + +The downside of this mode: + +- When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other. +- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster. + +## Cleanup process + +We will apply following annotations during the installation that are going to be used in the cleanup process (`helm uninstall`). If annotation is not present, cleanup of that resource is going to be skipped. + +The cleanup only patches the resource removing the `actions.github.com/cleanup-protection` finalizer. The client that created a resource is responsible for deleting them. Keep in mind, `helm uninstall` will automatically delete resources, causing the cleanup procedure to be complete. + +Annotations applied to the `AutoscalingRunnerSet` used in the cleanup procedure +are: + +- `actions.github.com/cleanup-github-secret-name` +- `actions.github.com/cleanup-manager-role-binding` +- `actions.github.com/cleanup-manager-role-name` +- `actions.github.com/cleanup-kubernetes-mode-role-binding-name` +- `actions.github.com/cleanup-kubernetes-mode-role-name` +- `actions.github.com/cleanup-kubernetes-mode-service-account-name` +- `actions.github.com/cleanup-no-permission-service-account-name` + +The order in which resources are being patched to remove finalizers: + +1. Kubernetes mode `RoleBinding` +1. Kubernetes mode `Role` +1. Kubernetes mode `ServiceAccount` +1. No permission `ServiceAccount` +1. GitHub `Secret` +1. Manager `RoleBinding` +1. Manager `Role` + +[^1]: Supersedes [ADR 2023-02-10](2023-02-10-limit-manager-role-permission.md) From 896ab38cb25957a2c360303913b68ee80bd746e3 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Wed, 12 Apr 2023 09:50:23 +0200 Subject: [PATCH 182/561] Reordering methods and constants so it is easier to look it up (#2501) --- .../autoscalinglistener_controller.go | 119 +++++----- .../autoscalinglistener_controller_test.go | 4 +- .../autoscalingrunnerset_controller.go | 49 ++-- .../autoscalingrunnerset_controller_test.go | 18 +- controllers/actions.github.com/constants.go | 46 ++++ .../ephemeralrunnerset_controller.go | 9 +- .../actions.github.com/resourcebuilder.go | 210 ++++++++---------- .../resourcebuilder_test.go | 4 +- 8 files changed, 232 insertions(+), 227 deletions(-) diff --git a/controllers/actions.github.com/autoscalinglistener_controller.go b/controllers/actions.github.com/autoscalinglistener_controller.go index 5509946c85..51525a83ab 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller.go +++ b/controllers/actions.github.com/autoscalinglistener_controller.go @@ -41,7 +41,6 @@ import ( const ( autoscalingListenerContainerName = "autoscaler" - autoscalingListenerOwnerKey = ".metadata.controller" autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer" ) @@ -246,65 +245,6 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{}, nil } -// SetupWithManager sets up the controller with the Manager. -func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error { - groupVersionIndexer := func(rawObj client.Object) []string { - groupVersion := v1alpha1.GroupVersion.String() - owner := metav1.GetControllerOf(rawObj) - if owner == nil { - return nil - } - - // ...make sure it is owned by this controller - if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" { - return nil - } - - // ...and if so, return it - return []string{owner.Name} - } - - if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil { - return err - } - - if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil { - return err - } - - labelBasedWatchFunc := func(obj client.Object) []reconcile.Request { - var requests []reconcile.Request - labels := obj.GetLabels() - namespace, ok := labels["auto-scaling-listener-namespace"] - if !ok { - return nil - } - - name, ok := labels["auto-scaling-listener-name"] - if !ok { - return nil - } - requests = append(requests, - reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - }, - ) - return requests - } - - return ctrl.NewControllerManagedBy(mgr). - For(&v1alpha1.AutoscalingListener{}). - Owns(&corev1.Pod{}). - Owns(&corev1.ServiceAccount{}). - Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)). - Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)). - WithEventFilter(predicate.ResourceVersionChangedPredicate{}). - Complete(r) -} - func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) { logger.Info("Cleaning up the listener pod") listenerPod := new(corev1.Pod) @@ -615,3 +555,62 @@ func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context "serviceAccount", serviceAccount.Name) return ctrl.Result{Requeue: true}, nil } + +// SetupWithManager sets up the controller with the Manager. +func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error { + groupVersionIndexer := func(rawObj client.Object) []string { + groupVersion := v1alpha1.GroupVersion.String() + owner := metav1.GetControllerOf(rawObj) + if owner == nil { + return nil + } + + // ...make sure it is owned by this controller + if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" { + return nil + } + + // ...and if so, return it + return []string{owner.Name} + } + + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, resourceOwnerKey, groupVersionIndexer); err != nil { + return err + } + + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, resourceOwnerKey, groupVersionIndexer); err != nil { + return err + } + + labelBasedWatchFunc := func(obj client.Object) []reconcile.Request { + var requests []reconcile.Request + labels := obj.GetLabels() + namespace, ok := labels["auto-scaling-listener-namespace"] + if !ok { + return nil + } + + name, ok := labels["auto-scaling-listener-name"] + if !ok { + return nil + } + requests = append(requests, + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: name, + Namespace: namespace, + }, + }, + ) + return requests + } + + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.AutoscalingListener{}). + Owns(&corev1.Pod{}). + Owns(&corev1.ServiceAccount{}). + Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)). + Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)). + WithEventFilter(predicate.ResourceVersionChangedPredicate{}). + Complete(r) +} diff --git a/controllers/actions.github.com/autoscalinglistener_controller_test.go b/controllers/actions.github.com/autoscalinglistener_controller_test.go index d493279761..882f4b231a 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller_test.go +++ b/controllers/actions.github.com/autoscalinglistener_controller_test.go @@ -213,7 +213,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { Eventually( func() error { podList := new(corev1.PodList) - err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name}) + err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name}) if err != nil { return err } @@ -231,7 +231,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { Eventually( func() error { serviceAccountList := new(corev1.ServiceAccountList) - err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name}) + err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name}) if err != nil { return err } diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 903acb06e3..16c20442b8 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -42,13 +42,10 @@ import ( ) const ( - // TODO: Replace with shared image. - autoscalingRunnerSetOwnerKey = ".metadata.controller" - LabelKeyRunnerSpecHash = "runner-spec-hash" - autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" - runnerScaleSetIdAnnotationKey = "runner-scale-set-id" - runnerScaleSetNameAnnotationKey = "runner-scale-set-name" - autoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection" + labelKeyRunnerSpecHash = "runner-spec-hash" + autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" + runnerScaleSetIdAnnotationKey = "runner-scale-set-id" + runnerScaleSetNameAnnotationKey = "runner-scale-set-name" ) // AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object @@ -201,10 +198,10 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() for _, runnerSet := range existingRunnerSets.all() { - log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[LabelKeyRunnerSpecHash]) + log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash]) } - if desiredSpecHash != latestRunnerSet.Labels[LabelKeyRunnerSpecHash] { + if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] { log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set") return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log) } @@ -232,7 +229,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl } // Our listener pod is out of date, so we need to delete it to get a new recreate. - if listener.Labels[LabelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() { + if listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() { log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name) if err := r.Delete(ctx, listener); err != nil { if kerrors.IsNotFound(err) { @@ -601,7 +598,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) { list := new(v1alpha1.EphemeralRunnerSetList) - if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingRunnerSet.Name}); err != nil { + if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil { return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err) } @@ -694,7 +691,7 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro return []string{owner.Name} } - if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, autoscalingRunnerSetOwnerKey, groupVersionIndexer); err != nil { + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, resourceOwnerKey, groupVersionIndexer); err != nil { return err } @@ -754,12 +751,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol err := c.client.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding) switch { case err == nil: - if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) { + if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) { c.logger.Info("Kubernetes mode role binding finalizer has already been removed", "name", roleBindingName) return } err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) { - controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName) }) if err != nil { c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err) @@ -797,12 +794,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol err := c.client.Get(ctx, types.NamespacedName{Name: roleName, Namespace: c.autoscalingRunnerSet.Namespace}, role) switch { case err == nil: - if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) { + if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) { c.logger.Info("Kubernetes mode role finalizer has already been removed", "name", roleName) return } err = patch(ctx, c.client, role, func(obj *rbacv1.Role) { - controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName) }) if err != nil { c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err) @@ -841,12 +838,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount) switch { case err == nil: - if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) { + if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) { c.logger.Info("Kubernetes mode service account finalizer has already been removed", "name", serviceAccountName) return } err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) { - controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName) }) if err != nil { c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err) @@ -885,12 +882,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount) switch { case err == nil: - if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) { + if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) { c.logger.Info("No permission service account finalizer has already been removed", "name", serviceAccountName) return } err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) { - controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName) }) if err != nil { c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err) @@ -929,12 +926,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal err := c.client.Get(ctx, types.NamespacedName{Name: githubSecretName, Namespace: c.autoscalingRunnerSet.Namespace}, githubSecret) switch { case err == nil: - if !controllerutil.ContainsFinalizer(githubSecret, autoscalingRunnerSetCleanupFinalizerName) { + if !controllerutil.ContainsFinalizer(githubSecret, AutoscalingRunnerSetCleanupFinalizerName) { c.logger.Info("GitHub secret finalizer has already been removed", "name", githubSecretName) return } err = patch(ctx, c.client, githubSecret, func(obj *corev1.Secret) { - controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName) }) if err != nil { c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err) @@ -973,12 +970,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding) switch { case err == nil: - if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) { + if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) { c.logger.Info("Manager role binding finalizer has already been removed", "name", managerRoleBindingName) return } err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) { - controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName) }) if err != nil { c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err) @@ -1017,12 +1014,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinali err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleName, Namespace: c.autoscalingRunnerSet.Namespace}, role) switch { case err == nil: - if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) { + if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) { c.logger.Info("Manager role finalizer has already been removed", "name", managerRoleName) return } err = patch(ctx, c.client, role, func(obj *rbacv1.Role) { - controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) + controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName) }) if err != nil { c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err) diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index 6ad2f18a1e..a53732e392 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -280,10 +280,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items)) } - return runnerSetList.Items[0].Labels[LabelKeyRunnerSpecHash], nil + return runnerSetList.Items[0].Labels[labelKeyRunnerSpecHash], nil }, autoscalingRunnerSetTestTimeout, - autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[LabelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created") + autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[labelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created") // We should create a new listener Eventually( @@ -1160,7 +1160,7 @@ var _ = Describe("Test external permissions cleanup", func() { ObjectMeta: metav1.ObjectMeta{ Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName], Namespace: autoscalingRunnerSet.Namespace, - Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName}, }, } @@ -1171,7 +1171,7 @@ var _ = Describe("Test external permissions cleanup", func() { ObjectMeta: metav1.ObjectMeta{ Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName], Namespace: autoscalingRunnerSet.Namespace, - Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName}, }, } @@ -1182,7 +1182,7 @@ var _ = Describe("Test external permissions cleanup", func() { ObjectMeta: metav1.ObjectMeta{ Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName], Namespace: autoscalingRunnerSet.Namespace, - Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName}, }, Subjects: []rbacv1.Subject{ { @@ -1317,7 +1317,7 @@ var _ = Describe("Test external permissions cleanup", func() { ObjectMeta: metav1.ObjectMeta{ Name: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName], Namespace: autoscalingRunnerSet.Namespace, - Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName}, }, Data: map[string][]byte{ "github_token": []byte(defaultGitHubToken), @@ -1333,7 +1333,7 @@ var _ = Describe("Test external permissions cleanup", func() { ObjectMeta: metav1.ObjectMeta{ Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName], Namespace: autoscalingRunnerSet.Namespace, - Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName}, }, } @@ -1344,7 +1344,7 @@ var _ = Describe("Test external permissions cleanup", func() { ObjectMeta: metav1.ObjectMeta{ Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName], Namespace: autoscalingRunnerSet.Namespace, - Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName}, }, RoleRef: rbacv1.RoleRef{ APIGroup: rbacv1.GroupName, @@ -1360,7 +1360,7 @@ var _ = Describe("Test external permissions cleanup", func() { ObjectMeta: metav1.ObjectMeta{ Name: autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName], Namespace: autoscalingRunnerSet.Namespace, - Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, + Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName}, }, } diff --git a/controllers/actions.github.com/constants.go b/controllers/actions.github.com/constants.go index 613db79c07..339c39d9e0 100644 --- a/controllers/actions.github.com/constants.go +++ b/controllers/actions.github.com/constants.go @@ -1,5 +1,7 @@ package actionsgithubcom +import corev1 "k8s.io/api/core/v1" + const ( LabelKeyRunnerTemplateHash = "runner-template-hash" LabelKeyPodTemplateHash = "pod-template-hash" @@ -16,3 +18,47 @@ const ( EnvVarHTTPSProxy = "https_proxy" EnvVarNoProxy = "no_proxy" ) + +// Labels applied to resources +const ( + // Kubernetes labels + LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of" + LabelKeyKubernetesComponent = "app.kubernetes.io/component" + LabelKeyKubernetesVersion = "app.kubernetes.io/version" + + // Github labels + LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name" + LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace" + LabelKeyGitHubEnterprise = "actions.github.com/enterprise" + LabelKeyGitHubOrganization = "actions.github.com/organization" + LabelKeyGitHubRepository = "actions.github.com/repository" +) + +// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running +const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection" + +const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name" + +// Labels applied to listener roles +const ( + labelKeyListenerName = "auto-scaling-listener-name" + labelKeyListenerNamespace = "auto-scaling-listener-namespace" +) + +// Annotations applied for later cleanup of resources +const ( + AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding" + AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name" + AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name" + AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name" + AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name" + AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name" + AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name" +) + +// DefaultScaleSetListenerImagePullPolicy is the default pull policy applied +// to the listener when ImagePullPolicy is not specified +const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent + +// ownerKey is field selector matching the owner name of a particular resource +const resourceOwnerKey = ".metadata.controller" diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index f4850cf5e3..dcc6a525aa 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -40,8 +40,7 @@ import ( ) const ( - ephemeralRunnerSetReconcilerOwnerKey = ".metadata.controller" - ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer" + ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer" ) // EphemeralRunnerSetReconciler reconciles a EphemeralRunnerSet object @@ -147,7 +146,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R ctx, ephemeralRunnerList, client.InNamespace(req.Namespace), - client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: req.Name}, + client.MatchingFields{resourceOwnerKey: req.Name}, ) if err != nil { log.Error(err, "Unable to list child ephemeral runners") @@ -243,7 +242,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) { ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList) - err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: ephemeralRunnerSet.Name}) + err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name}) if err != nil { return false, fmt.Errorf("failed to list child ephemeral runners: %v", err) } @@ -522,7 +521,7 @@ func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Conte // SetupWithManager sets up the controller with the Manager. func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { // Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups. - if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, ephemeralRunnerSetReconcilerOwnerKey, func(rawObj client.Object) []string { + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, resourceOwnerKey, func(rawObj client.Object) []string { groupVersion := v1alpha1.GroupVersion.String() // grab the job object, extract the owner... diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go index 2ddba11b13..4a5bccc46b 100644 --- a/controllers/actions.github.com/resourcebuilder.go +++ b/controllers/actions.github.com/resourcebuilder.go @@ -20,40 +20,6 @@ const ( jitTokenKey = "jitToken" ) -// Labels applied to resources -const ( - // Kubernetes labels - LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of" - LabelKeyKubernetesComponent = "app.kubernetes.io/component" - LabelKeyKubernetesVersion = "app.kubernetes.io/version" - - // Github labels - LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name" - LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace" - LabelKeyGitHubEnterprise = "actions.github.com/enterprise" - LabelKeyGitHubOrganization = "actions.github.com/organization" - LabelKeyGitHubRepository = "actions.github.com/repository" -) - -const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name" - -// Labels applied to listener roles -const ( - labelKeyListenerName = "auto-scaling-listener-name" - labelKeyListenerNamespace = "auto-scaling-listener-namespace" -) - -// Annotations applied for later cleanup of resources -const ( - AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding" - AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name" - AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name" - AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name" - AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name" - AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name" - AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name" -) - var commonLabelKeys = [...]string{ LabelKeyKubernetesPartOf, LabelKeyKubernetesComponent, @@ -67,8 +33,6 @@ var commonLabelKeys = [...]string{ const labelValueKubernetesPartOf = "gha-runner-scale-set" -const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent - // scaleSetListenerImagePullPolicy is applied to all listeners var scaleSetListenerImagePullPolicy = DefaultScaleSetListenerImagePullPolicy @@ -84,6 +48,62 @@ func SetListenerImagePullPolicy(pullPolicy string) bool { type resourceBuilder struct{} +func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) { + runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) + if err != nil { + return nil, err + } + + effectiveMinRunners := 0 + effectiveMaxRunners := math.MaxInt32 + if autoscalingRunnerSet.Spec.MaxRunners != nil { + effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners + } + if autoscalingRunnerSet.Spec.MinRunners != nil { + effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners + } + + githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl) + if err != nil { + return nil, fmt.Errorf("failed to parse github config from url: %v", err) + } + + autoscalingListener := &v1alpha1.AutoscalingListener{ + ObjectMeta: metav1.ObjectMeta{ + Name: scaleSetListenerName(autoscalingRunnerSet), + Namespace: namespace, + Labels: map[string]string{ + LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, + LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, + LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, + LabelKeyKubernetesComponent: "runner-scale-set-listener", + LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], + LabelKeyGitHubEnterprise: githubConfig.Enterprise, + LabelKeyGitHubOrganization: githubConfig.Organization, + LabelKeyGitHubRepository: githubConfig.Repository, + labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), + }, + }, + Spec: v1alpha1.AutoscalingListenerSpec{ + GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl, + GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret, + RunnerScaleSetId: runnerScaleSetId, + AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace, + AutoscalingRunnerSetName: autoscalingRunnerSet.Name, + EphemeralRunnerSetName: ephemeralRunnerSet.Name, + MinRunners: effectiveMinRunners, + MaxRunners: effectiveMaxRunners, + Image: image, + ImagePullPolicy: scaleSetListenerImagePullPolicy, + ImagePullSecrets: imagePullSecrets, + Proxy: autoscalingRunnerSet.Spec.Proxy, + GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS, + }, + } + + return autoscalingListener, nil +} + func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod { listenerEnv := []corev1.EnvVar{ { @@ -207,54 +227,6 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A return newRunnerScaleSetListenerPod } -func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) { - runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) - if err != nil { - return nil, err - } - runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() - - newLabels := map[string]string{ - LabelKeyRunnerSpecHash: runnerSpecHash, - LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, - LabelKeyKubernetesComponent: "runner-set", - LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], - LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, - LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, - } - - if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil { - return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err) - } - - newAnnotations := map[string]string{ - AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], - } - - newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-", - Namespace: autoscalingRunnerSet.ObjectMeta.Namespace, - Labels: newLabels, - Annotations: newAnnotations, - }, - Spec: v1alpha1.EphemeralRunnerSetSpec{ - Replicas: 0, - EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ - RunnerScaleSetId: runnerScaleSetId, - GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl, - GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret, - Proxy: autoscalingRunnerSet.Spec.Proxy, - GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS, - PodTemplateSpec: autoscalingRunnerSet.Spec.Template, - }, - }, - } - - return newEphemeralRunnerSet, nil -} - func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ @@ -344,60 +316,52 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v return newListenerSecret } -func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) { +func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) { runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) if err != nil { return nil, err } + runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() - effectiveMinRunners := 0 - effectiveMaxRunners := math.MaxInt32 - if autoscalingRunnerSet.Spec.MaxRunners != nil { - effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners + newLabels := map[string]string{ + labelKeyRunnerSpecHash: runnerSpecHash, + LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, + LabelKeyKubernetesComponent: "runner-set", + LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], + LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, + LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, } - if autoscalingRunnerSet.Spec.MinRunners != nil { - effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners + + if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil { + return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err) } - githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl) - if err != nil { - return nil, fmt.Errorf("failed to parse github config from url: %v", err) + newAnnotations := map[string]string{ + AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], } - autoscalingListener := &v1alpha1.AutoscalingListener{ + newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{ + TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ - Name: scaleSetListenerName(autoscalingRunnerSet), - Namespace: namespace, - Labels: map[string]string{ - LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, - LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, - LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, - LabelKeyKubernetesComponent: "runner-scale-set-listener", - LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], - LabelKeyGitHubEnterprise: githubConfig.Enterprise, - LabelKeyGitHubOrganization: githubConfig.Organization, - LabelKeyGitHubRepository: githubConfig.Repository, - LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), - }, + GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-", + Namespace: autoscalingRunnerSet.ObjectMeta.Namespace, + Labels: newLabels, + Annotations: newAnnotations, }, - Spec: v1alpha1.AutoscalingListenerSpec{ - GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl, - GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret, - RunnerScaleSetId: runnerScaleSetId, - AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace, - AutoscalingRunnerSetName: autoscalingRunnerSet.Name, - EphemeralRunnerSetName: ephemeralRunnerSet.Name, - MinRunners: effectiveMinRunners, - MaxRunners: effectiveMaxRunners, - Image: image, - ImagePullPolicy: scaleSetListenerImagePullPolicy, - ImagePullSecrets: imagePullSecrets, - Proxy: autoscalingRunnerSet.Spec.Proxy, - GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS, + Spec: v1alpha1.EphemeralRunnerSetSpec{ + Replicas: 0, + EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ + RunnerScaleSetId: runnerScaleSetId, + GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl, + GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret, + Proxy: autoscalingRunnerSet.Spec.Proxy, + GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS, + PodTemplateSpec: autoscalingRunnerSet.Spec.Template, + }, }, } - return autoscalingListener, nil + return newEphemeralRunnerSet, nil } func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner { diff --git a/controllers/actions.github.com/resourcebuilder_test.go b/controllers/actions.github.com/resourcebuilder_test.go index e41d798006..925ba5cf43 100644 --- a/controllers/actions.github.com/resourcebuilder_test.go +++ b/controllers/actions.github.com/resourcebuilder_test.go @@ -36,7 +36,7 @@ func TestLabelPropagation(t *testing.T) { assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf]) assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent]) assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion]) - assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash]) + assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash]) assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName]) assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace]) assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise]) @@ -49,7 +49,7 @@ func TestLabelPropagation(t *testing.T) { assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf]) assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent]) assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion]) - assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash]) + assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash]) assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName]) assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace]) assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise]) From fdd670cde0e728029c92c927cb5084199fb3faad Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Mon, 17 Apr 2023 21:30:41 +0900 Subject: [PATCH 183/561] Fix docker.sock permission error for non-dind Ubuntu 20.04 runners since v0.27.2 (#2499) #2490 has been happening since v0.27.2 for non-dind runners based on Ubuntu 20.04 runner images. It does not affect Ubuntu 22.04 non-dind runners(i.e. runners with dockerd sidecars) and Ubuntu 20.04/22.04 dind runners(i.e. runners without dockerd sidecars). However, presuming many folks are still using Ubuntu 20.04 runners and non-dind runners, we should fix it. This change tries to fix it by defaulting to the docker group id 1001 used by Ubuntu 20.04 runners, and use gid 121 for Ubuntu 22.04 runners. We use the image tag to see which Ubuntu version the runner is based on. The algorithm is so simple- we assume it's Ubuntu-22.04-based if the image tag contains "22.04". This might be a breaking change for folks who have already upgraded to Ubuntu 22.04 runners using their own custom runner images. Note again; we rely on the image tag to detect Ubuntu 22.04 runner images and use the proper docker gid- Folks using our official Ubuntu 22.04 runner images are not affected. It is a breaking change anyway, so I have added a remedy- ARC got a new flag, `--docker-gid`, which defaults to `1001` but can be set to `121` or whatever gid the operator/admin likes. This can be set to `--docker-gid=121`, for example, if you are using your own custom runner image based on Ubuntu 22.04 and the image tag does not contain "22.04". Fixes #2490 --- .../integration_test.go | 6 +- .../new_runner_pod_test.go | 88 +++++++++++++++++-- .../runner_controller.go | 55 ++++++++---- .../runnerset_controller.go | 13 ++- main.go | 54 +++++------- 5 files changed, 150 insertions(+), 66 deletions(-) diff --git a/controllers/actions.summerwind.net/integration_test.go b/controllers/actions.summerwind.net/integration_test.go index d10184eb84..2dc34ca7b4 100644 --- a/controllers/actions.summerwind.net/integration_test.go +++ b/controllers/actions.summerwind.net/integration_test.go @@ -105,12 +105,14 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment { Log: logf.Log, Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"), GitHubClient: multiClient, - RunnerImage: "example/runner:test", - DockerImage: "example/docker:test", Name: controllerName("runner"), RegistrationRecheckInterval: time.Millisecond * 100, RegistrationRecheckJitter: time.Millisecond * 10, UnregistrationRetryDelay: 1 * time.Second, + RunnerPodDefaults: RunnerPodDefaults{ + RunnerImage: "example/runner:test", + DockerImage: "example/docker:test", + }, } err = runnerController.SetupWithManager(mgr) Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller") diff --git a/controllers/actions.summerwind.net/new_runner_pod_test.go b/controllers/actions.summerwind.net/new_runner_pod_test.go index fb9b6653a8..dfd0b2faf5 100644 --- a/controllers/actions.summerwind.net/new_runner_pod_test.go +++ b/controllers/actions.summerwind.net/new_runner_pod_test.go @@ -15,6 +15,21 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +func newRunnerPod(template corev1.Pod, runnerSpec arcv1alpha1.RunnerConfig, githubBaseURL string, d RunnerPodDefaults) (corev1.Pod, error) { + return newRunnerPodWithContainerMode("", template, runnerSpec, githubBaseURL, d) +} + +func setEnv(c *corev1.Container, name, value string) { + for j := range c.Env { + e := &c.Env[j] + + if e.Name == name { + e.Value = value + return + } + } +} + func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume { GBs, err := resource.ParseQuantity(storageReq) if err != nil { @@ -171,7 +186,7 @@ func TestNewRunnerPod(t *testing.T) { Env: []corev1.EnvVar{ { Name: "DOCKER_GROUP_GID", - Value: "121", + Value: "1234", }, }, VolumeMounts: []corev1.VolumeMount{ @@ -397,6 +412,50 @@ func TestNewRunnerPod(t *testing.T) { config: arcv1alpha1.RunnerConfig{}, want: newTestPod(base, nil), }, + { + description: "it should respect DOCKER_GROUP_GID of the dockerd sidecar container", + template: corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "docker", + Env: []corev1.EnvVar{ + { + Name: "DOCKER_GROUP_GID", + Value: "2345", + }, + }, + }, + }, + }, + }, + config: arcv1alpha1.RunnerConfig{}, + want: newTestPod(base, func(p *corev1.Pod) { + setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "2345") + }), + }, + { + description: "it should add DOCKER_GROUP_GID=1001 to the dockerd sidecar container for Ubuntu 20.04 runners", + template: corev1.Pod{}, + config: arcv1alpha1.RunnerConfig{ + Image: "ghcr.io/summerwind/actions-runner:ubuntu-20.04-20210726-1", + }, + want: newTestPod(base, func(p *corev1.Pod) { + setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "1001") + p.Spec.Containers[0].Image = "ghcr.io/summerwind/actions-runner:ubuntu-20.04-20210726-1" + }), + }, + { + description: "it should add DOCKER_GROUP_GID=121 to the dockerd sidecar container for Ubuntu 22.04 runners", + template: corev1.Pod{}, + config: arcv1alpha1.RunnerConfig{ + Image: "ghcr.io/summerwind/actions-runner:ubuntu-22.04-20210726-1", + }, + want: newTestPod(base, func(p *corev1.Pod) { + setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "121") + p.Spec.Containers[0].Image = "ghcr.io/summerwind/actions-runner:ubuntu-22.04-20210726-1" + }), + }, { description: "dockerdWithinRunnerContainer=true should set privileged=true and omit the dind sidecar container", template: corev1.Pod{}, @@ -552,7 +611,14 @@ func TestNewRunnerPod(t *testing.T) { for i := range testcases { tc := testcases[i] t.Run(tc.description, func(t *testing.T) { - got, err := newRunnerPod(tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, false) + got, err := newRunnerPod(tc.template, tc.config, githubBaseURL, RunnerPodDefaults{ + RunnerImage: defaultRunnerImage, + RunnerImagePullSecrets: defaultRunnerImagePullSecrets, + DockerImage: defaultDockerImage, + DockerRegistryMirror: defaultDockerRegistryMirror, + DockerGID: "1234", + UseRunnerStatusUpdateHook: false, + }) require.NoError(t, err) require.Equal(t, tc.want, got) }) @@ -713,7 +779,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { Env: []corev1.EnvVar{ { Name: "DOCKER_GROUP_GID", - Value: "121", + Value: "1234", }, }, VolumeMounts: []corev1.VolumeMount{ @@ -1171,6 +1237,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { defaultRunnerImage = "default-runner-image" defaultRunnerImagePullSecrets = []string{} defaultDockerImage = "default-docker-image" + defaultDockerGID = "1234" defaultDockerRegistryMirror = "" githubBaseURL = "api.github.com" ) @@ -1190,12 +1257,15 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { t.Run(tc.description, func(t *testing.T) { r := &RunnerReconciler{ - RunnerImage: defaultRunnerImage, - RunnerImagePullSecrets: defaultRunnerImagePullSecrets, - DockerImage: defaultDockerImage, - DockerRegistryMirror: defaultDockerRegistryMirror, - GitHubClient: multiClient, - Scheme: scheme, + GitHubClient: multiClient, + Scheme: scheme, + RunnerPodDefaults: RunnerPodDefaults{ + RunnerImage: defaultRunnerImage, + RunnerImagePullSecrets: defaultRunnerImagePullSecrets, + DockerImage: defaultDockerImage, + DockerRegistryMirror: defaultDockerRegistryMirror, + DockerGID: defaultDockerGID, + }, } got, err := r.newPod(tc.runner) require.NoError(t, err) diff --git a/controllers/actions.summerwind.net/runner_controller.go b/controllers/actions.summerwind.net/runner_controller.go index 4fa00968fd..c0632ad5d1 100644 --- a/controllers/actions.summerwind.net/runner_controller.go +++ b/controllers/actions.summerwind.net/runner_controller.go @@ -68,15 +68,24 @@ type RunnerReconciler struct { Recorder record.EventRecorder Scheme *runtime.Scheme GitHubClient *MultiGitHubClient - RunnerImage string - RunnerImagePullSecrets []string - DockerImage string - DockerRegistryMirror string Name string RegistrationRecheckInterval time.Duration RegistrationRecheckJitter time.Duration - UseRunnerStatusUpdateHook bool UnregistrationRetryDelay time.Duration + + RunnerPodDefaults RunnerPodDefaults +} + +type RunnerPodDefaults struct { + RunnerImage string + RunnerImagePullSecrets []string + DockerImage string + DockerRegistryMirror string + // The default Docker group ID to use for the dockerd sidecar container. + // Ubuntu 20.04 runner images assumes 1001 and the 22.04 variant assumes 121 by default. + DockerGID string + + UseRunnerStatusUpdateHook bool } // +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete @@ -145,7 +154,7 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr ready := runnerPodReady(&pod) - if (runner.Status.Phase != phase || runner.Status.Ready != ready) && !r.UseRunnerStatusUpdateHook || runner.Status.Phase == "" && r.UseRunnerStatusUpdateHook { + if (runner.Status.Phase != phase || runner.Status.Ready != ready) && !r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Status.Phase == "" && r.RunnerPodDefaults.UseRunnerStatusUpdateHook { if pod.Status.Phase == corev1.PodRunning { // Seeing this message, you can expect the runner to become `Running` soon. log.V(1).Info( @@ -292,7 +301,7 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a return ctrl.Result{}, err } - needsServiceAccount := runner.Spec.ServiceAccountName == "" && (r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes") + needsServiceAccount := runner.Spec.ServiceAccountName == "" && (r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes") if needsServiceAccount { serviceAccount := &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ @@ -306,7 +315,7 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a rules := []rbacv1.PolicyRule{} - if r.UseRunnerStatusUpdateHook { + if r.RunnerPodDefaults.UseRunnerStatusUpdateHook { rules = append(rules, []rbacv1.PolicyRule{ { APIGroups: []string{"actions.summerwind.dev"}, @@ -583,7 +592,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) { } } - pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, ghc.GithubBaseURL, r.UseRunnerStatusUpdateHook) + pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, ghc.GithubBaseURL, r.RunnerPodDefaults) if err != nil { return pod, err } @@ -634,7 +643,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) { if runnerSpec.ServiceAccountName != "" { pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName - } else if r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" { + } else if r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" { pod.Spec.ServiceAccountName = runner.ObjectMeta.Name } @@ -754,13 +763,19 @@ func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) { }, nil } -func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHook bool) (corev1.Pod, error) { +func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, githubBaseURL string, d RunnerPodDefaults) (corev1.Pod, error) { var ( privileged bool = true dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer dockerEnabled bool = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled ephemeral bool = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral dockerdInRunnerPrivileged bool = dockerdInRunner + + defaultRunnerImage = d.RunnerImage + defaultRunnerImagePullSecrets = d.RunnerImagePullSecrets + defaultDockerImage = d.DockerImage + defaultDockerRegistryMirror = d.DockerRegistryMirror + useRunnerStatusUpdateHook = d.UseRunnerStatusUpdateHook ) if containerMode == "kubernetes" { @@ -1013,10 +1028,22 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru // for actions-runner-controller) so typically should not need to be // overridden if ok, _ := envVarPresent("DOCKER_GROUP_GID", dockerdContainer.Env); !ok { + gid := d.DockerGID + // We default to gid 121 for Ubuntu 22.04 images + // See below for more details + // - https://github.com/actions/actions-runner-controller/issues/2490#issuecomment-1501561923 + // - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-20.04.dockerfile#L14 + // - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-22.04.dockerfile#L12 + if strings.Contains(runnerContainer.Image, "22.04") { + gid = "121" + } else if strings.Contains(runnerContainer.Image, "20.04") { + gid = "1001" + } + dockerdContainer.Env = append(dockerdContainer.Env, corev1.EnvVar{ Name: "DOCKER_GROUP_GID", - Value: "121", + Value: gid, }) } dockerdContainer.Args = append(dockerdContainer.Args, "--group=$(DOCKER_GROUP_GID)") @@ -1240,10 +1267,6 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru return *pod, nil } -func newRunnerPod(template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHookEphemeralRole bool) (corev1.Pod, error) { - return newRunnerPodWithContainerMode("", template, runnerSpec, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, useRunnerStatusUpdateHookEphemeralRole) -} - func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error { name := "runner-controller" if r.Name != "" { diff --git a/controllers/actions.summerwind.net/runnerset_controller.go b/controllers/actions.summerwind.net/runnerset_controller.go index f937237ff8..5fd825a218 100644 --- a/controllers/actions.summerwind.net/runnerset_controller.go +++ b/controllers/actions.summerwind.net/runnerset_controller.go @@ -45,13 +45,10 @@ type RunnerSetReconciler struct { Recorder record.EventRecorder Scheme *runtime.Scheme - CommonRunnerLabels []string - GitHubClient *MultiGitHubClient - RunnerImage string - RunnerImagePullSecrets []string - DockerImage string - DockerRegistryMirror string - UseRunnerStatusUpdateHook bool + CommonRunnerLabels []string + GitHubClient *MultiGitHubClient + + RunnerPodDefaults RunnerPodDefaults } // +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete @@ -231,7 +228,7 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a githubBaseURL := ghc.GithubBaseURL - pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, githubBaseURL, r.UseRunnerStatusUpdateHook) + pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, githubBaseURL, r.RunnerPodDefaults) if err != nil { return nil, err } diff --git a/main.go b/main.go index aee0322572..543db603ab 100644 --- a/main.go +++ b/main.go @@ -45,6 +45,7 @@ import ( const ( defaultRunnerImage = "summerwind/actions-runner:latest" defaultDockerImage = "docker:dind" + defaultDockerGID = "1001" ) var scheme = runtime.NewScheme() @@ -76,18 +77,15 @@ func main() { autoScalingRunnerSetOnly bool enableLeaderElection bool disableAdmissionWebhook bool - runnerStatusUpdateHook bool leaderElectionId string port int syncPeriod time.Duration defaultScaleDownDelay time.Duration - runnerImage string runnerImagePullSecrets stringSlice + runnerPodDefaults actionssummerwindnet.RunnerPodDefaults - dockerImage string - dockerRegistryMirror string namespace string logLevel string logFormat string @@ -108,10 +106,11 @@ func main() { flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") flag.StringVar(&leaderElectionId, "leader-election-id", "actions-runner-controller", "Controller id for leader election.") - flag.StringVar(&runnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container to use by default if one isn't defined in yaml.") - flag.StringVar(&dockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container to use by default if one isn't defined in yaml.") + flag.StringVar(&runnerPodDefaults.RunnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container to use by default if one isn't defined in yaml.") + flag.StringVar(&runnerPodDefaults.DockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container to use by default if one isn't defined in yaml.") + flag.StringVar(&runnerPodDefaults.DockerGID, "docker-gid", defaultDockerGID, "The default GID of docker group in the docker sidecar container. Use 1001 for dockerd sidecars of Ubuntu 20.04 runners 121 for Ubuntu 22.04.") flag.Var(&runnerImagePullSecrets, "runner-image-pull-secret", "The default image-pull secret name for self-hosted runner container.") - flag.StringVar(&dockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.") + flag.StringVar(&runnerPodDefaults.DockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.") flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.") flag.StringVar(&c.EnterpriseURL, "github-enterprise-url", c.EnterpriseURL, "Enterprise URL to be used for your GitHub API calls") flag.Int64Var(&c.AppID, "github-app-id", c.AppID, "The application ID of GitHub App.") @@ -122,7 +121,7 @@ func main() { flag.StringVar(&c.BasicauthUsername, "github-basicauth-username", c.BasicauthUsername, "Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API") flag.StringVar(&c.BasicauthPassword, "github-basicauth-password", c.BasicauthPassword, "Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API") flag.StringVar(&c.RunnerGitHubURL, "runner-github-url", c.RunnerGitHubURL, "GitHub URL to be used by runners during registration") - flag.BoolVar(&runnerStatusUpdateHook, "runner-status-update-hook", false, "Use custom RBAC for runners (role, role binding and service account).") + flag.BoolVar(&runnerPodDefaults.UseRunnerStatusUpdateHook, "runner-status-update-hook", false, "Use custom RBAC for runners (role, role binding and service account).") flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", actionssummerwindnet.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)") flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind") flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.") @@ -135,6 +134,8 @@ func main() { flag.Var(&autoScalerImagePullSecrets, "auto-scaler-image-pull-secrets", "The default image-pull secret name for auto-scaler listener container.") flag.Parse() + runnerPodDefaults.RunnerImagePullSecrets = runnerImagePullSecrets + log, err := logging.NewLogger(logLevel, logFormat) if err != nil { fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err) @@ -255,16 +256,11 @@ func main() { ) runnerReconciler := &actionssummerwindnet.RunnerReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("runner"), - Scheme: mgr.GetScheme(), - GitHubClient: multiClient, - DockerImage: dockerImage, - DockerRegistryMirror: dockerRegistryMirror, - UseRunnerStatusUpdateHook: runnerStatusUpdateHook, - // Defaults for self-hosted runner containers - RunnerImage: runnerImage, - RunnerImagePullSecrets: runnerImagePullSecrets, + Client: mgr.GetClient(), + Log: log.WithName("runner"), + Scheme: mgr.GetScheme(), + GitHubClient: multiClient, + RunnerPodDefaults: runnerPodDefaults, } if err = runnerReconciler.SetupWithManager(mgr); err != nil { @@ -296,17 +292,12 @@ func main() { } runnerSetReconciler := &actionssummerwindnet.RunnerSetReconciler{ - Client: mgr.GetClient(), - Log: log.WithName("runnerset"), - Scheme: mgr.GetScheme(), - CommonRunnerLabels: commonRunnerLabels, - DockerImage: dockerImage, - DockerRegistryMirror: dockerRegistryMirror, - GitHubClient: multiClient, - // Defaults for self-hosted runner containers - RunnerImage: runnerImage, - RunnerImagePullSecrets: runnerImagePullSecrets, - UseRunnerStatusUpdateHook: runnerStatusUpdateHook, + Client: mgr.GetClient(), + Log: log.WithName("runnerset"), + Scheme: mgr.GetScheme(), + CommonRunnerLabels: commonRunnerLabels, + GitHubClient: multiClient, + RunnerPodDefaults: runnerPodDefaults, } if err = runnerSetReconciler.SetupWithManager(mgr); err != nil { @@ -319,8 +310,9 @@ func main() { "version", build.Version, "default-scale-down-delay", defaultScaleDownDelay, "sync-period", syncPeriod, - "default-runner-image", runnerImage, - "default-docker-image", dockerImage, + "default-runner-image", runnerPodDefaults.RunnerImage, + "default-docker-image", runnerPodDefaults.DockerImage, + "default-docker-gid", runnerPodDefaults.DockerGID, "common-runnner-labels", commonRunnerLabels, "leader-election-enabled", enableLeaderElection, "leader-election-id", leaderElectionId, From 7a3a94eb1ff2009c516e6e976cd3049479e3cf7f Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Mon, 17 Apr 2023 22:00:57 +0900 Subject: [PATCH 184/561] Bump chart version to v0.23.2 for ARC v0.27.3 (#2514) Ref #2490 --- charts/actions-runner-controller/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/actions-runner-controller/Chart.yaml b/charts/actions-runner-controller/Chart.yaml index 16906dc286..a490cd63f4 100644 --- a/charts/actions-runner-controller/Chart.yaml +++ b/charts/actions-runner-controller/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.23.1 +version: 0.23.2 # Used as the default manager tag value when no tag property is provided in the values.yaml -appVersion: 0.27.2 +appVersion: 0.27.3 home: https://github.com/actions/actions-runner-controller From c585e7c42f687e5d18c76abab922baffc97cacdc Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 17 Apr 2023 20:09:56 +0200 Subject: [PATCH 185/561] Fix the path of the index.yaml in job summary (#2515) --- .github/workflows/publish-chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish-chart.yaml b/.github/workflows/publish-chart.yaml index bdc796d69e..7e98a765b4 100644 --- a/.github/workflows/publish-chart.yaml +++ b/.github/workflows/publish-chart.yaml @@ -204,4 +204,4 @@ jobs: echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Status:**" >> $GITHUB_STEP_SUMMARY - echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY + echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY From 3687320a0d5e7ebfdcb4166da94cce8b504ad817 Mon Sep 17 00:00:00 2001 From: cavila-evoliq <107415653+cavila-evoliq@users.noreply.github.com> Date: Mon, 17 Apr 2023 18:26:14 -0500 Subject: [PATCH 186/561] Update ubuntu-22.04 Dockerfile to add python user script dir (#2508) --- runner/actions-runner.ubuntu-22.04.dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/runner/actions-runner.ubuntu-22.04.dockerfile b/runner/actions-runner.ubuntu-22.04.dockerfile index 28a61eb8c7..b0e5fcea2e 100644 --- a/runner/actions-runner.ubuntu-22.04.dockerfile +++ b/runner/actions-runner.ubuntu-22.04.dockerfile @@ -98,6 +98,8 @@ COPY docker-shim.sh /usr/local/bin/docker # Configure hooks folder structure. COPY hooks /etc/arc/hooks/ +# Add the Python "User Script Directory" to the PATH +ENV PATH="${PATH}:${HOME}/.local/bin/" ENV ImageOS=ubuntu22 RUN echo "PATH=${PATH}" > /etc/environment \ From eef990fd8291450e7e5fadcca4e40fb658cd91f3 Mon Sep 17 00:00:00 2001 From: Sam Greening <2552620+SG60@users.noreply.github.com> Date: Fri, 21 Apr 2023 02:59:34 +0100 Subject: [PATCH 187/561] Revert actions-runner-controller image tag in kustomization to latest (#2522) --- config/manager/kustomization.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 6a60f8b38a..fea1ed0b47 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -5,7 +5,7 @@ kind: Kustomization images: - name: controller newName: summerwind/actions-runner-controller - newTag: dev + newTag: latest replacements: - path: env-replacement.yaml From e258497170eefad56a3c79d21e864799e8c1d09f Mon Sep 17 00:00:00 2001 From: Edgar Kalinovski <36729677+EdgeSan@users.noreply.github.com> Date: Fri, 21 Apr 2023 05:10:55 +0300 Subject: [PATCH 188/561] Add description for "dockerRegistryMirror" key (#2488) --- charts/actions-runner-controller/values.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/charts/actions-runner-controller/values.yaml b/charts/actions-runner-controller/values.yaml index b5ea7f077c..b1def68e9f 100644 --- a/charts/actions-runner-controller/values.yaml +++ b/charts/actions-runner-controller/values.yaml @@ -47,6 +47,7 @@ authSecret: #github_basicauth_username: "" #github_basicauth_password: "" +# http(s) should be specified for dockerRegistryMirror, e.g.: dockerRegistryMirror="https://" dockerRegistryMirror: "" image: repository: "summerwind/actions-runner-controller" From 502de418769bd8b308c60591353b55560c7cd209 Mon Sep 17 00:00:00 2001 From: Thomas <9749173+uhthomas@users.noreply.github.com> Date: Fri, 21 Apr 2023 03:15:53 +0100 Subject: [PATCH 189/561] docs: Fix typo for automatic runner scaling (#2375) --- docs/automatically-scaling-runners.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/automatically-scaling-runners.md b/docs/automatically-scaling-runners.md index 05c0214fda..5af157d71c 100644 --- a/docs/automatically-scaling-runners.md +++ b/docs/automatically-scaling-runners.md @@ -17,7 +17,7 @@ This anti-flap configuration also has the final say on if a runner can be scaled This delay is configurable via 2 methods: 1. By setting a new default via the controller's `--default-scale-down-delay` flag -2. By setting by setting the attribute `scaleDownDelaySecondsAfterScaleOut:` in a `HorizontalRunnerAutoscaler` kind's `spec:`. +2. By setting the attribute `scaleDownDelaySecondsAfterScaleOut:` in a `HorizontalRunnerAutoscaler` kind's `spec:`. Below is a complete basic example of one of the pull driven scaling metrics. From bbf917e44f5c9f6cb1fb63133346ef279ca5b4f7 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Mon, 24 Apr 2023 10:40:15 +0200 Subject: [PATCH 190/561] Use build.Version to check if resource version is a mismatch (#2521) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- .github/workflows/e2e-test-linux-vm.yaml | 2 +- .../autoscalingrunnerset_controller.go | 17 ++ .../autoscalingrunnerset_controller_test.go | 173 +++++++++++++++++- 3 files changed, 184 insertions(+), 8 deletions(-) diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/e2e-test-linux-vm.yaml index 6dd39fb4c4..35dafedd41 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/e2e-test-linux-vm.yaml @@ -16,7 +16,7 @@ env: TARGET_ORG: actions-runner-controller TARGET_REPO: arc_e2e_test_dummy IMAGE_NAME: "arc-test-image" - IMAGE_VERSION: "dev" + IMAGE_VERSION: "0.4.0" jobs: default-setup: diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 16c20442b8..79e28df90a 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -24,6 +24,7 @@ import ( "strings" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/build" "github.com/actions/actions-runner-controller/github/actions" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -136,6 +137,22 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, nil } + if autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion] != build.Version { + if err := r.Delete(ctx, autoscalingRunnerSet); err != nil { + log.Error(err, "Failed to delete autoscaling runner set on version mismatch", + "targetVersion", build.Version, + "actualVersion", autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], + ) + return ctrl.Result{}, nil + } + + log.Info("Autoscaling runner set version doesn't match the build version. Deleting the resource.", + "targetVersion", build.Version, + "actualVersion", autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], + ) + return ctrl.Result{}, nil + } + if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) { log.Info("Adding finalizer") if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index a53732e392..390511e634 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/types" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/build" "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions/fake" "github.com/actions/actions-runner-controller/github/actions/testserver" @@ -38,13 +39,25 @@ const ( autoscalingRunnerSetTestGitHubToken = "gh_token" ) -var _ = Describe("Test AutoScalingRunnerSet controller", func() { +var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() { var ctx context.Context var mgr ctrl.Manager var autoscalingNS *corev1.Namespace var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet var configSecret *corev1.Secret + var originalBuildVersion string + buildVersion := "0.1.0" + + BeforeAll(func() { + originalBuildVersion = build.Version + build.Version = buildVersion + }) + + AfterAll(func() { + build.Version = originalBuildVersion + }) + BeforeEach(func() { ctx = context.Background() autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) @@ -67,6 +80,9 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, + Labels: map[string]string{ + LabelKeyKubernetesVersion: buildVersion, + }, }, Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", @@ -474,7 +490,19 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() { }) }) -var _ = Describe("Test AutoScalingController updates", func() { +var _ = Describe("Test AutoScalingController updates", Ordered, func() { + var originalBuildVersion string + buildVersion := "0.1.0" + + BeforeAll(func() { + originalBuildVersion = build.Version + build.Version = buildVersion + }) + + AfterAll(func() { + build.Version = originalBuildVersion + }) + Context("Creating autoscaling runner set with RunnerScaleSetName set", func() { var ctx context.Context var mgr ctrl.Manager @@ -483,6 +511,7 @@ var _ = Describe("Test AutoScalingController updates", func() { var configSecret *corev1.Secret BeforeEach(func() { + originalBuildVersion = build.Version ctx = context.Background() autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) @@ -528,6 +557,9 @@ var _ = Describe("Test AutoScalingController updates", func() { ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, + Labels: map[string]string{ + LabelKeyKubernetesVersion: buildVersion, + }, }, Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", @@ -598,7 +630,18 @@ var _ = Describe("Test AutoScalingController updates", func() { }) }) -var _ = Describe("Test AutoscalingController creation failures", func() { +var _ = Describe("Test AutoscalingController creation failures", Ordered, func() { + var originalBuildVersion string + buildVersion := "0.1.0" + + BeforeAll(func() { + originalBuildVersion = build.Version + build.Version = buildVersion + }) + + AfterAll(func() { + build.Version = originalBuildVersion + }) Context("When autoscaling runner set creation fails on the client", func() { var ctx context.Context var mgr ctrl.Manager @@ -629,6 +672,9 @@ var _ = Describe("Test AutoscalingController creation failures", func() { ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, + Labels: map[string]string{ + LabelKeyKubernetesVersion: buildVersion, + }, }, Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", @@ -707,7 +753,18 @@ var _ = Describe("Test AutoscalingController creation failures", func() { }) }) -var _ = Describe("Test Client optional configuration", func() { +var _ = Describe("Test client optional configuration", Ordered, func() { + var originalBuildVersion string + buildVersion := "0.1.0" + + BeforeAll(func() { + originalBuildVersion = build.Version + build.Version = buildVersion + }) + + AfterAll(func() { + build.Version = originalBuildVersion + }) Context("When specifying a proxy", func() { var ctx context.Context var mgr ctrl.Manager @@ -747,6 +804,9 @@ var _ = Describe("Test Client optional configuration", func() { ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, + Labels: map[string]string{ + LabelKeyKubernetesVersion: buildVersion, + }, }, Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "http://example.com/org/repo", @@ -823,6 +883,9 @@ var _ = Describe("Test Client optional configuration", func() { ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, + Labels: map[string]string{ + LabelKeyKubernetesVersion: buildVersion, + }, }, Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "http://example.com/org/repo", @@ -939,6 +1002,9 @@ var _ = Describe("Test Client optional configuration", func() { ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, + Labels: map[string]string{ + LabelKeyKubernetesVersion: buildVersion, + }, }, Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: server.ConfigURLForOrg("my-org"), @@ -989,6 +1055,9 @@ var _ = Describe("Test Client optional configuration", func() { ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, + Labels: map[string]string{ + LabelKeyKubernetesVersion: buildVersion, + }, }, Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", @@ -1050,6 +1119,9 @@ var _ = Describe("Test Client optional configuration", func() { ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, + Labels: map[string]string{ + LabelKeyKubernetesVersion: buildVersion, + }, }, Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", @@ -1102,7 +1174,19 @@ var _ = Describe("Test Client optional configuration", func() { }) }) -var _ = Describe("Test external permissions cleanup", func() { +var _ = Describe("Test external permissions cleanup", Ordered, func() { + var originalBuildVersion string + buildVersion := "0.1.0" + + BeforeAll(func() { + originalBuildVersion = build.Version + build.Version = buildVersion + }) + + AfterAll(func() { + build.Version = originalBuildVersion + }) + It("Should clean up kubernetes mode permissions", func() { ctx := context.Background() autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient) @@ -1129,7 +1213,8 @@ var _ = Describe("Test external permissions cleanup", func() { Name: "test-asrs", Namespace: autoscalingNS.Name, Labels: map[string]string{ - "app.kubernetes.io/name": "gha-runner-scale-set", + "app.kubernetes.io/name": "gha-runner-scale-set", + LabelKeyKubernetesVersion: buildVersion, }, Annotations: map[string]string{ AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding", @@ -1286,7 +1371,8 @@ var _ = Describe("Test external permissions cleanup", func() { Name: "test-asrs", Namespace: autoscalingNS.Name, Labels: map[string]string{ - "app.kubernetes.io/name": "gha-runner-scale-set", + "app.kubernetes.io/name": "gha-runner-scale-set", + LabelKeyKubernetesVersion: buildVersion, }, Annotations: map[string]string{ AnnotationKeyManagerRoleName: "manager-role", @@ -1465,3 +1551,76 @@ var _ = Describe("Test external permissions cleanup", func() { ).Should(BeTrue(), "Expected role to be cleaned up") }) }) + +var _ = Describe("Test resource version and build version mismatch", func() { + It("Should delete and recreate the autoscaling runner set to match the build version", func() { + ctx := context.Background() + autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient) + + configSecret := createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) + + controller := &AutoscalingRunnerSetReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: logf.Log, + ControllerNamespace: autoscalingNS.Name, + DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", + ActionsClient: fake.NewMultiClient(), + } + err := controller.SetupWithManager(mgr) + Expect(err).NotTo(HaveOccurred(), "failed to setup controller") + + originalVersion := build.Version + defer func() { + build.Version = originalVersion + }() + build.Version = "0.2.0" + + min := 1 + max := 10 + autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-asrs", + Namespace: autoscalingNS.Name, + Labels: map[string]string{ + "app.kubernetes.io/name": "gha-runner-scale-set", + "app.kubernetes.io/version": "0.1.0", + }, + Annotations: map[string]string{ + AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding", + AnnotationKeyKubernetesModeRoleName: "kube-mode-role", + AnnotationKeyKubernetesModeServiceAccountName: "kube-mode-service-account", + }, + }, + Spec: v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: "https://github.com/owner/repo", + GitHubConfigSecret: configSecret.Name, + MaxRunners: &max, + MinRunners: &min, + RunnerGroup: "testgroup", + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/runner", + }, + }, + }, + }, + }, + } + + // create autoscaling runner set before starting a manager + err = k8sClient.Create(ctx, autoscalingRunnerSet) + Expect(err).NotTo(HaveOccurred()) + + startManagers(GinkgoT(), mgr) + + Eventually(func() bool { + ars := new(v1alpha1.AutoscalingRunnerSet) + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Name}, ars) + return errors.IsNotFound(err) + }).Should(BeTrue()) + }) +}) From 094a29bc44ee3fd5a308707b80e4ff73505a1e24 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Thu, 27 Apr 2023 13:06:35 +0900 Subject: [PATCH 191/561] Revert docker.sock path to /var/run/docker.sock (#2536) Starting ARC v0.27.2, we've changed the `docker.sock` path from `/var/run/docker.sock` to `/var/run/docker/docker.sock`. That resulted in breaking some container-based actions due to the hard-coded `docker.sock` path in various places. Even `actions/runner` seem to use `/var/run/docker.sock` for building container-based actions and for service containers? Anyway, this fixes that by moving the sock file back to the previous location. Once this gets merged, users stuck at ARC v0.27.1, previously upgraded to 0.27.2 or 0.27.3 and reverted back to v0.27.1 due to #2519, should be able to upgrade to the upcoming v0.27.4. Resolves #2519 Resolves #2538 --- acceptance/deploy.sh | 1 + acceptance/deploy_runners.sh | 4 ++ .../testdata/runnerdeploy.envsubst.yaml | 27 ++++++++++ .../new_runner_pod_test.go | 48 ++++++++--------- .../runner_controller.go | 23 +++++---- test/e2e/e2e_test.go | 51 +++++++++++++++---- testing/workflow.go | 7 +++ 7 files changed, 118 insertions(+), 43 deletions(-) diff --git a/acceptance/deploy.sh b/acceptance/deploy.sh index eaa5518786..da542f3831 100755 --- a/acceptance/deploy.sh +++ b/acceptance/deploy.sh @@ -102,6 +102,7 @@ if [ "${tool}" == "helm" ]; then --set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \ --set actionsMetricsServer.podAnnotations.test-id=${TEST_ID} \ ${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \ + --set image.dindSidecarRepositoryAndTag=${DIND_SIDECAR_REPOSITORY_AND_TAG} \ -f ${VALUES_FILE} set +v # To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes` diff --git a/acceptance/deploy_runners.sh b/acceptance/deploy_runners.sh index bb8a21d4e8..9a3a87987f 100755 --- a/acceptance/deploy_runners.sh +++ b/acceptance/deploy_runners.sh @@ -6,6 +6,10 @@ OP=${OP:-apply} RUNNER_LABEL=${RUNNER_LABEL:-self-hosted} +# See https://github.com/actions/actions-runner-controller/issues/2123 +kubectl delete secret generic docker-config || : +kubectl create secret generic docker-config --from-file .dockerconfigjson=<(jq -M 'del(.aliases)' $HOME/.docker/config.json) --type=kubernetes.io/dockerconfigjson || : + cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f - if [ -n "${TEST_REPO}" ]; then diff --git a/acceptance/testdata/runnerdeploy.envsubst.yaml b/acceptance/testdata/runnerdeploy.envsubst.yaml index 6521eb216b..f522bc4175 100644 --- a/acceptance/testdata/runnerdeploy.envsubst.yaml +++ b/acceptance/testdata/runnerdeploy.envsubst.yaml @@ -95,6 +95,24 @@ spec: # that part is created by dockerd. mountPath: /home/runner/.local readOnly: false + # See https://github.com/actions/actions-runner-controller/issues/2123 + # Be sure to omit the "aliases" field from the config.json. + # Otherwise you may encounter nasty errors like: + # $ docker build + # docker: 'buildx' is not a docker command. + # See 'docker --help' + # due to the incompatibility between your host docker config.json and the runner environment. + # That is, your host dockcer config.json might contain this: + # "aliases": { + # "builder": "buildx" + # } + # And this results in the above error when the runner does not have buildx installed yet. + - name: docker-config + mountPath: /home/runner/.docker/config.json + subPath: config.json + readOnly: true + - name: docker-config-root + mountPath: /home/runner/.docker volumes: - name: rootless-dind-work-dir ephemeral: @@ -105,6 +123,15 @@ spec: resources: requests: storage: 3Gi + - name: docker-config + # Refer to .dockerconfigjson/.docker/config.json + secret: + secretName: docker-config + items: + - key: .dockerconfigjson + path: config.json + - name: docker-config-root + emptyDir: {} # # Non-standard working directory diff --git a/controllers/actions.summerwind.net/new_runner_pod_test.go b/controllers/actions.summerwind.net/new_runner_pod_test.go index dfd0b2faf5..f72a347348 100644 --- a/controllers/actions.summerwind.net/new_runner_pod_test.go +++ b/controllers/actions.summerwind.net/new_runner_pod_test.go @@ -91,7 +91,7 @@ func TestNewRunnerPod(t *testing.T) { }, }, { - Name: "docker-sock", + Name: "var-run", VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{ Medium: corev1.StorageMediumMemory, @@ -155,7 +155,7 @@ func TestNewRunnerPod(t *testing.T) { }, { Name: "DOCKER_HOST", - Value: "unix:///run/docker/docker.sock", + Value: "unix:///run/docker.sock", }, }, VolumeMounts: []corev1.VolumeMount{ @@ -168,8 +168,8 @@ func TestNewRunnerPod(t *testing.T) { MountPath: "/runner/_work", }, { - Name: "docker-sock", - MountPath: "/run/docker", + Name: "var-run", + MountPath: "/run", }, }, ImagePullPolicy: corev1.PullAlways, @@ -180,7 +180,7 @@ func TestNewRunnerPod(t *testing.T) { Image: "default-docker-image", Args: []string{ "dockerd", - "--host=unix:///run/docker/docker.sock", + "--host=unix:///run/docker.sock", "--group=$(DOCKER_GROUP_GID)", }, Env: []corev1.EnvVar{ @@ -195,8 +195,8 @@ func TestNewRunnerPod(t *testing.T) { MountPath: "/runner", }, { - Name: "docker-sock", - MountPath: "/run/docker", + Name: "var-run", + MountPath: "/run", }, { Name: "work", @@ -543,7 +543,7 @@ func TestNewRunnerPod(t *testing.T) { }, }, { - Name: "docker-sock", + Name: "var-run", VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{ Medium: corev1.StorageMediumMemory, @@ -562,8 +562,8 @@ func TestNewRunnerPod(t *testing.T) { MountPath: "/runner", }, { - Name: "docker-sock", - MountPath: "/run/docker", + Name: "var-run", + MountPath: "/run", }, } }), @@ -587,7 +587,7 @@ func TestNewRunnerPod(t *testing.T) { }, }, { - Name: "docker-sock", + Name: "var-run", VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{ Medium: corev1.StorageMediumMemory, @@ -676,7 +676,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { }, }, { - Name: "docker-sock", + Name: "var-run", VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{ Medium: corev1.StorageMediumMemory, @@ -740,7 +740,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { }, { Name: "DOCKER_HOST", - Value: "unix:///run/docker/docker.sock", + Value: "unix:///run/docker.sock", }, { Name: "RUNNER_NAME", @@ -761,8 +761,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { MountPath: "/runner/_work", }, { - Name: "docker-sock", - MountPath: "/run/docker", + Name: "var-run", + MountPath: "/run", }, }, ImagePullPolicy: corev1.PullAlways, @@ -773,7 +773,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { Image: "default-docker-image", Args: []string{ "dockerd", - "--host=unix:///run/docker/docker.sock", + "--host=unix:///run/docker.sock", "--group=$(DOCKER_GROUP_GID)", }, Env: []corev1.EnvVar{ @@ -788,8 +788,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { MountPath: "/runner", }, { - Name: "docker-sock", - MountPath: "/run/docker", + Name: "var-run", + MountPath: "/run", }, { Name: "work", @@ -1149,8 +1149,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { MountPath: "/runner/_work", }, { - Name: "docker-sock", - MountPath: "/run/docker", + Name: "var-run", + MountPath: "/run", }, }, }, @@ -1170,7 +1170,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { }, }, { - Name: "docker-sock", + Name: "var-run", VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{ Medium: corev1.StorageMediumMemory, @@ -1186,8 +1186,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { MountPath: "/runner/_work", }, { - Name: "docker-sock", - MountPath: "/run/docker", + Name: "var-run", + MountPath: "/run", }, { Name: "runner", @@ -1219,7 +1219,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) { }, }, { - Name: "docker-sock", + Name: "var-run", VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{ Medium: corev1.StorageMediumMemory, diff --git a/controllers/actions.summerwind.net/runner_controller.go b/controllers/actions.summerwind.net/runner_controller.go index c0632ad5d1..574d08aa91 100644 --- a/controllers/actions.summerwind.net/runner_controller.go +++ b/controllers/actions.summerwind.net/runner_controller.go @@ -778,6 +778,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru useRunnerStatusUpdateHook = d.UseRunnerStatusUpdateHook ) + const ( + varRunVolumeName = "var-run" + varRunVolumeMountPath = "/run" + ) + if containerMode == "kubernetes" { dockerdInRunner = false dockerEnabled = false @@ -1020,7 +1025,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru // explicitly invoke `dockerd` to avoid automatic TLS / TCP binding dockerdContainer.Args = append([]string{ "dockerd", - "--host=unix:///run/docker/docker.sock", + "--host=unix:///run/docker.sock", }, dockerdContainer.Args...) // this must match a GID for the user in the runner image @@ -1054,7 +1059,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru runnerContainer.Env = append(runnerContainer.Env, corev1.EnvVar{ Name: "DOCKER_HOST", - Value: "unix:///run/docker/docker.sock", + Value: "unix:///run/docker.sock", }, ) @@ -1071,7 +1076,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ - Name: "docker-sock", + Name: varRunVolumeName, VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{ Medium: corev1.StorageMediumMemory, @@ -1090,11 +1095,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru ) } - if ok, _ := volumeMountPresent("docker-sock", runnerContainer.VolumeMounts); !ok { + if ok, _ := volumeMountPresent(varRunVolumeName, runnerContainer.VolumeMounts); !ok { runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts, corev1.VolumeMount{ - Name: "docker-sock", - MountPath: "/run/docker", + Name: varRunVolumeName, + MountPath: varRunVolumeMountPath, }, ) } @@ -1108,10 +1113,10 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru }, } - if p, _ := volumeMountPresent("docker-sock", dockerdContainer.VolumeMounts); !p { + if p, _ := volumeMountPresent(varRunVolumeName, dockerdContainer.VolumeMounts); !p { dockerVolumeMounts = append(dockerVolumeMounts, corev1.VolumeMount{ - Name: "docker-sock", - MountPath: "/run/docker", + Name: varRunVolumeName, + MountPath: varRunVolumeMountPath, }) } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index ef925ccc09..dd09387c1e 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -31,13 +31,8 @@ var ( // https://cert-manager.io/docs/installation/supported-releases/ certManagerVersion = "v1.8.2" - images = []testing.ContainerImage{ - testing.Img("docker", "dind"), - testing.Img("quay.io/brancz/kube-rbac-proxy", "v0.10.0"), - testing.Img("quay.io/jetstack/cert-manager-controller", certManagerVersion), - testing.Img("quay.io/jetstack/cert-manager-cainjector", certManagerVersion), - testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion), - } + arcStableImageRepo = "summerwind/actions-runner-controller" + arcStableImageTag = "v0.25.2" testResultCMNamePrefix = "test-result-" @@ -105,8 +100,8 @@ func TestE2E(t *testing.T) { }{ { label: "stable", - controller: "summerwind/actions-runner-controller", - controllerVer: "v0.25.2", + controller: arcStableImageRepo, + controllerVer: arcStableImageTag, chart: "actions-runner-controller/actions-runner-controller", // 0.20.2 accidentally added support for runner-status-update which isn't supported by ARC 0.25.2. // With some chart values, the controller end up with crashlooping with `flag provided but not defined: -runner-status-update-hook`. @@ -423,6 +418,7 @@ type env struct { admissionWebhooksTimeout string imagePullSecretName string imagePullPolicy string + dindSidecarRepositoryAndTag string watchNamespace string vars vars @@ -436,6 +432,8 @@ type vars struct { runnerDindImageRepo string runnerRootlessDindImageRepo string + dindSidecarImageRepo, dindSidecarImageTag string + prebuildImages []testing.ContainerImage builds []testing.DockerBuild @@ -458,6 +456,10 @@ func buildVars(repo, ubuntuVer string) vars { runnerImage = testing.Img(runnerImageRepo, runnerImageTag) runnerDindImage = testing.Img(runnerDindImageRepo, runnerImageTag) runnerRootlessDindImage = testing.Img(runnerRootlessDindImageRepo, runnerImageTag) + + dindSidecarImageRepo = "docker" + dindSidecarImageTag = "20.10.23-dind" + dindSidecarImage = testing.Img(dindSidecarImageRepo, dindSidecarImageTag) ) var vs vars @@ -467,6 +469,9 @@ func buildVars(repo, ubuntuVer string) vars { vs.runnerRootlessDindImageRepo = runnerRootlessDindImageRepo vs.runnerImageRepo = runnerImageRepo + vs.dindSidecarImageRepo = dindSidecarImageRepo + vs.dindSidecarImageTag = dindSidecarImageTag + // vs.controllerImage, vs.controllerImageTag vs.prebuildImages = []testing.ContainerImage{ @@ -474,6 +479,7 @@ func buildVars(repo, ubuntuVer string) vars { runnerImage, runnerDindImage, runnerRootlessDindImage, + dindSidecarImage, } vs.builds = []testing.DockerBuild{ @@ -558,6 +564,8 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env { e.remoteKubeconfig = testing.Getenv(t, "ARC_E2E_REMOTE_KUBECONFIG", "") e.admissionWebhooksTimeout = testing.Getenv(t, "ARC_E2E_ADMISSION_WEBHOOKS_TIMEOUT", "") e.imagePullSecretName = testing.Getenv(t, "ARC_E2E_IMAGE_PULL_SECRET_NAME", "") + // This should be the default for Ubuntu 20.04 based runner images + e.dindSidecarRepositoryAndTag = vars.dindSidecarImageRepo + ":" + vars.dindSidecarImageTag e.vars = vars if e.remoteKubeconfig != "" { @@ -569,6 +577,17 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env { e.watchNamespace = testing.Getenv(t, "TEST_WATCH_NAMESPACE", "") if e.remoteKubeconfig == "" { + images := []testing.ContainerImage{ + testing.Img(vars.dindSidecarImageRepo, vars.dindSidecarImageTag), + testing.Img("quay.io/brancz/kube-rbac-proxy", "v0.10.0"), + testing.Img("quay.io/jetstack/cert-manager-controller", certManagerVersion), + testing.Img("quay.io/jetstack/cert-manager-cainjector", certManagerVersion), + testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion), + // Otherwise kubelet would fail to pull images from DockerHub due too rate limit: + // Warning Failed 19s kubelet Failed to pull image "summerwind/actions-runner-controller:v0.25.2": rpc error: code = Unknown desc = failed to pull and unpack image "docker.io/summerwind/actions-runner-controller:v0.25.2": failed to copy: httpReadSeeker: failed open: unexpected status code https://registry-1.docker.io/v2/summerwind/actions-runner-controller/manifests/sha256:92faf7e9f7f09a6240cdb5eb82eaf448852bdddf2fb77d0a5669fd8e5062b97b: 429 Too Many Requests - Server message: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit + testing.Img(arcStableImageRepo, arcStableImageTag), + } + e.Kind = testing.StartKind(t, k8sMinorVer, testing.Preload(images...)) e.Env.Kubeconfig = e.Kind.Kubeconfig() } else { @@ -750,6 +769,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, ch "ADMISSION_WEBHOOKS_TIMEOUT=" + e.admissionWebhooksTimeout, "IMAGE_PULL_SECRET=" + e.imagePullSecretName, "IMAGE_PULL_POLICY=" + e.imagePullPolicy, + "DIND_SIDECAR_REPOSITORY_AND_TAG=" + e.dindSidecarRepositoryAndTag, "WATCH_NAMESPACE=" + e.watchNamespace, } @@ -1156,10 +1176,21 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam With: setupBuildXActionWith, }, testing.Step{ - Run: "docker buildx build --platform=linux/amd64 " + + Run: "docker buildx build --platform=linux/amd64 -t test1 --load " + dockerBuildCache + fmt.Sprintf("-f %s .", dockerfile), }, + testing.Step{ + Run: "docker run --rm test1", + }, + testing.Step{ + Uses: "addnab/docker-run-action@v3", + With: &testing.With{ + Image: "test1", + Run: "hello", + Shell: "sh", + }, + }, ) if useSudo { diff --git a/testing/workflow.go b/testing/workflow.go index 107858ef8e..34c85d17d7 100644 --- a/testing/workflow.go +++ b/testing/workflow.go @@ -55,4 +55,11 @@ type With struct { // Needs to be "docker" in rootless mode // https://stackoverflow.com/questions/66142872/how-to-solve-error-with-rootless-docker-in-github-actions-self-hosted-runner-wr Driver string `json:"driver,omitempty"` + + // Image is the image arg passed to docker-run-action + Image string `json:"image,omitempty"` + // Run is the run arg passed to docker-run-action + Run string `json:"run,omitempty"` + // Shell is the shell arg passed to docker-run-action + Shell string `json:"shell,omitempty"` } From c536b7a49c7e9fa076c9e384d5a2c4ab0b181e32 Mon Sep 17 00:00:00 2001 From: Nuru Date: Wed, 26 Apr 2023 21:15:23 -0700 Subject: [PATCH 192/561] Stricter filtering of check run completion events (#2520) I observed that 100% of canceled jobs in my runner pool were not causing scale down events. This PR fixes that. The problem was caused by #2119. #2119 ignores certain webhook events in order to fix #2118. However, #2119 overdoes it and filters out valid job cancellation events. This PR uses stricter filtering and add visibility for future troubleshooting.
Example cancellation event This is the redacted top portion of a valid cancellation event my runner pool received and ignored. ```json { "action": "completed", "workflow_job": { "id": 12848997134, "run_id": 4738060033, "workflow_name": "slack-notifier", "head_branch": "auto-update/slack-notifier-0.5.1", "run_url": "https://api.github.com/repos/nuru//actions/runs/4738060033", "run_attempt": 1, "node_id": "CR_kwDOB8Xtbc8AAAAC_dwjDg", "head_sha": "55bada8f3d0d3e12a510a1bf34d0c3e169b65f89", "url": "https://api.github.com/repos/nuru//actions/jobs/12848997134", "html_url": "https://github.com/nuru//actions/runs/4738060033/jobs/8411515430", "status": "completed", "conclusion": "cancelled", "created_at": "2023-04-19T00:03:12Z", "started_at": "2023-04-19T00:03:42Z", "completed_at": "2023-04-19T00:03:42Z", "name": "build (arm64)", "steps": [ ], "check_run_url": "https://api.github.com/repos/nuru//check-runs/12848997134", "labels": [ "self-hosted", "arm64" ], "runner_id": 0, "runner_name": "", "runner_group_id": 0, "runner_group_name": "" }, ```
--- .../horizontal_runner_autoscaler_webhook.go | 24 +++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go index 59f7046994..a377fb9f66 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go @@ -210,13 +210,23 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons if e.GetAction() == "queued" { target.Amount = 1 break - } else if e.GetAction() == "completed" && e.GetWorkflowJob().GetConclusion() != "skipped" && e.GetWorkflowJob().GetRunnerID() > 0 { - // A negative amount is processed in the tryScale func as a scale-down request, - // that erases the oldest CapacityReservation with the same amount. - // If the first CapacityReservation was with Replicas=1, this negative scale target erases that, - // so that the resulting desired replicas decreases by 1. - target.Amount = -1 - break + } else if e.GetAction() == "completed" && e.GetWorkflowJob().GetConclusion() != "skipped" { + // We want to filter out "completed" events sent by check runs. + // See https://github.com/actions/actions-runner-controller/issues/2118 + // and https://github.com/actions/actions-runner-controller/pull/2119 + // But canceled events have runner_id == 0 and GetRunnerID() returns 0 when RunnerID == nil, + // so we need to be more specific in filtering out the check runs. + // See example check run completion at https://gist.github.com/nathanklick/268fea6496a4d7b14cecb2999747ef84 + if e.GetWorkflowJob().GetConclusion() == "success" && e.GetWorkflowJob().RunnerID == nil { + log.V(1).Info("Ignoring workflow_job event because it does not relate to a self-hosted runner") + } else { + // A negative amount is processed in the tryScale func as a scale-down request, + // that erases the oldest CapacityReservation with the same amount. + // If the first CapacityReservation was with Replicas=1, this negative scale target erases that, + // so that the resulting desired replicas decreases by 1. + target.Amount = -1 + break + } } // If the conclusion is "skipped", we will ignore it and fallthrough to the default case. fallthrough From a9e0570be9ac2fab90b3b0eec33f5e9a9ae5b01c Mon Sep 17 00:00:00 2001 From: Paul Brousseau Date: Wed, 26 Apr 2023 21:15:49 -0700 Subject: [PATCH 193/561] docs: minor correction for actions metrics server secret (#2542) Aligning docs with what the Helm chart produces --- charts/actions-runner-controller/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/actions-runner-controller/README.md b/charts/actions-runner-controller/README.md index 465dd96de2..7ccf6094d5 100644 --- a/charts/actions-runner-controller/README.md +++ b/charts/actions-runner-controller/README.md @@ -115,9 +115,9 @@ All additional docs are kept in the `docs/` folder, this README is solely for do | `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | | | `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text | | `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false | -| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false | +| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the actions-metrics-server | false | | `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false | -| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server | +| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server | | `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | | | `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | | | `actionsMetricsServer.nameOverride` | Override the resource name prefix | | From 52ad57a21feb5e5b99b6460241802fe7152c0062 Mon Sep 17 00:00:00 2001 From: Thilo Uttendorfer Date: Thu, 27 Apr 2023 06:16:12 +0200 Subject: [PATCH 194/561] Fix the default version of kube-rbac-proxy in the docs (#2535) --- charts/actions-runner-controller/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/actions-runner-controller/README.md b/charts/actions-runner-controller/README.md index 7ccf6094d5..92ef1e473f 100644 --- a/charts/actions-runner-controller/README.md +++ b/charts/actions-runner-controller/README.md @@ -46,7 +46,7 @@ All additional docs are kept in the `docs/` folder, this README is solely for do | `metrics.port` | Set port of metrics service | 8443 | | `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true | | `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy | -| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 | +| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 | | `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | | | `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | | | `fullnameOverride` | Override the full resource names | | @@ -147,5 +147,5 @@ All additional docs are kept in the `docs/` folder, this README is solely for do | `actionsMetrics.port` | Set port of actions metrics service | 8443 | | `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true | | `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy | -| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 | +| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 | | `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | | From 42b33b87f7fb04dfe11c51ae472257b37410ff1f Mon Sep 17 00:00:00 2001 From: argokasper Date: Thu, 27 Apr 2023 07:22:41 +0300 Subject: [PATCH 195/561] Fix GET validation for lowercase http methods (#2497) Some requests send method in lowercase (verified with curl and as a default for AWS ALB health check requests), but Go HTTP library constant MethodGet is in upper. --- .../horizontal_runner_autoscaler_webhook.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go index a377fb9f66..09013c7f58 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go @@ -115,7 +115,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons }() // respond ok to GET / e.g. for health check - if r.Method == http.MethodGet { + if strings.ToUpper(r.Method) == http.MethodGet { ok = true fmt.Fprintln(w, "webhook server is running") return From 0bf383a682b378de65cce06b65d3f597a113012f Mon Sep 17 00:00:00 2001 From: Thomas B Date: Thu, 27 Apr 2023 06:33:48 +0200 Subject: [PATCH 196/561] Add CR and CRB to the helm chart (#2504) In response to https://github.com/actions/actions-runner-controller/issues/2212 , the ARC helm chart is missing ClusterRoleBinding and ClusterRole for the ActionsMetricsServer resulting on missing permissions. This also fix the labels of the ActionsMetricsServer Service as it is selected by the ServiceMonitor with those labels. Co-authored-by: Yusuke Kuoka --- .../templates/actionsmetrics.role.yaml | 90 +++++++++++++++++++ .../actionsmetrics.role_binding.yaml | 14 +++ .../templates/actionsmetrics.service.yaml | 2 +- 3 files changed, 105 insertions(+), 1 deletion(-) create mode 100644 charts/actions-runner-controller/templates/actionsmetrics.role.yaml create mode 100644 charts/actions-runner-controller/templates/actionsmetrics.role_binding.yaml diff --git a/charts/actions-runner-controller/templates/actionsmetrics.role.yaml b/charts/actions-runner-controller/templates/actionsmetrics.role.yaml new file mode 100644 index 0000000000..829bcf3bf8 --- /dev/null +++ b/charts/actions-runner-controller/templates/actionsmetrics.role.yaml @@ -0,0 +1,90 @@ +{{- if .Values.actionsMetricsServer.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }} +rules: +- apiGroups: + - actions.summerwind.dev + resources: + - horizontalrunnerautoscalers + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.summerwind.dev + resources: + - horizontalrunnerautoscalers/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.summerwind.dev + resources: + - horizontalrunnerautoscalers/status + verbs: + - get + - patch + - update +- apiGroups: + - actions.summerwind.dev + resources: + - runnersets + verbs: + - get + - list + - watch +- apiGroups: + - actions.summerwind.dev + resources: + - runnerdeployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.summerwind.dev + resources: + - runnerdeployments/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - actions.summerwind.dev + resources: + - runnerdeployments/status + verbs: + - get + - patch + - update +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +{{- end }} diff --git a/charts/actions-runner-controller/templates/actionsmetrics.role_binding.yaml b/charts/actions-runner-controller/templates/actionsmetrics.role_binding.yaml new file mode 100644 index 0000000000..0b64ed5f11 --- /dev/null +++ b/charts/actions-runner-controller/templates/actionsmetrics.role_binding.yaml @@ -0,0 +1,14 @@ +{{- if .Values.actionsMetricsServer.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }} +subjects: + - kind: ServiceAccount + name: {{ include "actions-runner-controller-actions-metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/actions-runner-controller/templates/actionsmetrics.service.yaml b/charts/actions-runner-controller/templates/actionsmetrics.service.yaml index cfd2738e1d..2c70f24b01 100644 --- a/charts/actions-runner-controller/templates/actionsmetrics.service.yaml +++ b/charts/actions-runner-controller/templates/actionsmetrics.service.yaml @@ -5,7 +5,7 @@ metadata: name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }} namespace: {{ .Release.Namespace }} labels: - {{- include "actions-runner-controller.labels" . | nindent 4 }} + {{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }} {{- if .Values.actionsMetricsServer.service.annotations }} annotations: {{ toYaml .Values.actionsMetricsServer.service.annotations | nindent 4 }} From ac052513682848e6d2660b840f2713802c95960d Mon Sep 17 00:00:00 2001 From: Alex Williams Date: Thu, 27 Apr 2023 05:50:31 +0100 Subject: [PATCH 197/561] Update helm chart to support actions metrics graceful termiantion (#2498) # Summary - add lifecycle, terminationGracePeriodSeconds, and loadBalancerSource ranges to metrics server - these were missed when copying from the other webhook server - original PR adding them to the other webhook server is here https://github.com/actions/actions-runner-controller/pull/2305 Co-authored-by: Yusuke Kuoka --- charts/actions-runner-controller/README.md | 20 ++++++++++++------- .../templates/actionsmetrics.deployment.yaml | 8 +++++++- .../templates/actionsmetrics.service.yaml | 6 ++++++ charts/actions-runner-controller/values.yaml | 4 +++- 4 files changed, 29 insertions(+), 9 deletions(-) diff --git a/charts/actions-runner-controller/README.md b/charts/actions-runner-controller/README.md index 92ef1e473f..d291bb6fee 100644 --- a/charts/actions-runner-controller/README.md +++ b/charts/actions-runner-controller/README.md @@ -102,8 +102,11 @@ All additional docs are kept in the `docs/` folder, this README is solely for do | `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | | | `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | | | `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | | +| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` | +| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` | | `githubWebhookServer.service.type` | Set githubWebhookServer service type | | | `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` | +| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` | | `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false | | `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | | | `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` | @@ -135,17 +138,20 @@ All additional docs are kept in the `docs/` folder, this README is solely for do | `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | | | `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | | | `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | | +| `actionsMetricsServer.terminationGracePeriodSeconds` | Set the actionsMetricsServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` | +| `actionsMetricsServer.lifecycle` | Set the actionsMetricsServer pod lifecycle hooks | `{}` | | `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | | | `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` | +| `actionsMetricsServer.service.loadBalancerSourceRanges` | Set actionsMetricsServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` | | `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false | | `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | | | `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` | | `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | | | `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | | -| `actionsMetrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false | -| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | | -| `actionsMetrics.port` | Set port of actions metrics service | 8443 | -| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true | -| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy | -| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 | -| `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | | +| `actionsMetrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false | +| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | | +| `actionsMetrics.port` | Set port of actions metrics service | 8443 | +| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true | +| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy | +| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 | +| `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | | diff --git a/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml b/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml index 5eac200262..d7cb67b239 100644 --- a/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml +++ b/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml @@ -50,6 +50,12 @@ spec: {{- end }} command: - "/actions-metrics-server" + {{- if .Values.actionsMetricsServer.lifecycle }} + {{- with .Values.actionsMetricsServer.lifecycle }} + lifecycle: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} env: - name: GITHUB_WEBHOOK_SECRET_TOKEN valueFrom: @@ -142,7 +148,7 @@ spec: securityContext: {{- toYaml .Values.securityContext | nindent 12 }} {{- end }} - terminationGracePeriodSeconds: 10 + terminationGracePeriodSeconds: {{ .Values.actionsMetricsServer.terminationGracePeriodSeconds }} {{- with .Values.actionsMetricsServer.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/charts/actions-runner-controller/templates/actionsmetrics.service.yaml b/charts/actions-runner-controller/templates/actionsmetrics.service.yaml index 2c70f24b01..0cfae32a6d 100644 --- a/charts/actions-runner-controller/templates/actionsmetrics.service.yaml +++ b/charts/actions-runner-controller/templates/actionsmetrics.service.yaml @@ -23,4 +23,10 @@ spec: {{- end }} selector: {{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }} + {{- if .Values.actionsMetricsServer.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $ip := .Values.actionsMetricsServer.service.loadBalancerSourceRanges }} + - {{ $ip -}} + {{- end }} + {{- end }} {{- end }} diff --git a/charts/actions-runner-controller/values.yaml b/charts/actions-runner-controller/values.yaml index b1def68e9f..9d4dab840e 100644 --- a/charts/actions-runner-controller/values.yaml +++ b/charts/actions-runner-controller/values.yaml @@ -360,6 +360,7 @@ actionsMetricsServer: protocol: TCP name: http #nodePort: someFixedPortForUseWithTerraformCdkCfnEtc + loadBalancerSourceRanges: [] ingress: enabled: false ingressClassName: "" @@ -389,4 +390,5 @@ actionsMetricsServer: # - secretName: chart-example-tls # hosts: # - chart-example.local - + terminationGracePeriodSeconds: 10 + lifecycle: {} From b6dc2e1c73044e35f248d9784d9bd53bd65689d6 Mon Sep 17 00:00:00 2001 From: mspasoje <129410785+mspasoje@users.noreply.github.com> Date: Wed, 26 Apr 2023 21:53:22 -0700 Subject: [PATCH 198/561] Fix for GHES when authorized through GitHub App with GITHUB_URL instead of GITHUB_ENTERPRISE_URL (#2464) Ref #2457 --- github/github.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/github/github.go b/github/github.go index 3d7251e4af..f5d710d330 100644 --- a/github/github.go +++ b/github/github.go @@ -84,6 +84,8 @@ func (c *Config) NewClient() (*Client, error) { return nil, fmt.Errorf("enterprise url incorrect: %v", err) } tr.BaseURL = githubAPIURL + } else if c.URL != "" && tr.BaseURL != c.URL { + tr.BaseURL = c.URL } transport = tr } From a5320abbf7efcdd148da328d6c682d998ac9e0ea Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 28 Apr 2023 10:05:46 -0400 Subject: [PATCH 199/561] Update runner to version 2.304.0 (#2543) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- Makefile | 2 +- runner/Makefile | 2 +- runner/VERSION | 2 +- test/e2e/e2e_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 3d3a83716a..69b86aef23 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ else endif DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) VERSION ?= dev -RUNNER_VERSION ?= 2.303.0 +RUNNER_VERSION ?= 2.304.0 TARGETPLATFORM ?= $(shell arch) RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_TAG ?= ${VERSION} diff --git a/runner/Makefile b/runner/Makefile index d7dafebdae..d404ab4843 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) -RUNNER_VERSION ?= 2.303.0 +RUNNER_VERSION ?= 2.304.0 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.2.0 DOCKER_VERSION ?= 20.10.23 diff --git a/runner/VERSION b/runner/VERSION index f67bb997a5..971440973d 100644 --- a/runner/VERSION +++ b/runner/VERSION @@ -1 +1 @@ -2.303.0 +2.304.0 diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index dd09387c1e..91038ee495 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -36,7 +36,7 @@ var ( testResultCMNamePrefix = "test-result-" - RunnerVersion = "2.303.0" + RunnerVersion = "2.304.0" ) // If you're willing to run this test via VS Code "run test" or "debug test", From a60557138104be29e93aaa3a01048eeecb88cb6e Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Wed, 3 May 2023 11:53:42 +0200 Subject: [PATCH 200/561] Check release tag version and chart versions during the release process (#2524) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- .../workflows/publish-runner-scale-set.yaml | 21 ++++++---- .gitignore | 2 + Makefile | 2 +- hack/check-gh-chart-versions.sh | 42 +++++++++++++++++++ hack/make-env.sh | 2 +- 5 files changed, 60 insertions(+), 9 deletions(-) create mode 100755 hack/check-gh-chart-versions.sh diff --git a/.github/workflows/publish-runner-scale-set.yaml b/.github/workflows/publish-runner-scale-set.yaml index 0508b3c560..12029e9a82 100644 --- a/.github/workflows/publish-runner-scale-set.yaml +++ b/.github/workflows/publish-runner-scale-set.yaml @@ -36,7 +36,7 @@ permissions: packages: write jobs: - build-push-image: + build-push-image: name: Build and push controller image runs-on: ubuntu-latest steps: @@ -46,7 +46,14 @@ jobs: # If inputs.ref is empty, it'll resolve to the default branch ref: ${{ inputs.ref }} - - name: Resolve parameters + - name: Check chart versions + # Binary version and chart versions need to match. + # In case of an upgrade, the controller will try to clean up + # resources with older versions that should have been cleaned up + # during the upgrade process + run: ./hack/check-gh-chart-versions.sh ${{ inputs.release_tag_name }} + + - name: Resolve parameters id: resolve_parameters run: | resolvedRef="${{ inputs.ref }}" @@ -67,7 +74,7 @@ jobs: uses: docker/setup-buildx-action@v2 with: # Pinning v0.9.1 for Buildx and BuildKit v0.10.6 - # BuildKit v0.11 which has a bug causing intermittent + # BuildKit v0.11 which has a bug causing intermittent # failures pushing images to GHCR version: v0.9.1 driver-opts: image=moby/buildkit:v0.10.6 @@ -115,7 +122,7 @@ jobs: # If inputs.ref is empty, it'll resolve to the default branch ref: ${{ inputs.ref }} - - name: Resolve parameters + - name: Resolve parameters id: resolve_parameters run: | resolvedRef="${{ inputs.ref }}" @@ -126,7 +133,7 @@ jobs: echo "INFO: Resolving short SHA for $resolvedRef" echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT echo "INFO: Normalizing repository name (lowercase)" - echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT + echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT - name: Set up Helm # Using https://github.com/Azure/setup-helm/releases/tag/v3.5 @@ -163,7 +170,7 @@ jobs: # If inputs.ref is empty, it'll resolve to the default branch ref: ${{ inputs.ref }} - - name: Resolve parameters + - name: Resolve parameters id: resolve_parameters run: | resolvedRef="${{ inputs.ref }}" @@ -174,7 +181,7 @@ jobs: echo "INFO: Resolving short SHA for $resolvedRef" echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT echo "INFO: Normalizing repository name (lowercase)" - echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT + echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT - name: Set up Helm # Using https://github.com/Azure/setup-helm/releases/tag/v3.5 diff --git a/.gitignore b/.gitignore index 0e4e30b7a8..ce539d20b8 100644 --- a/.gitignore +++ b/.gitignore @@ -35,3 +35,5 @@ bin .DS_STORE /test-assets + +/.tools diff --git a/Makefile b/Makefile index 69b86aef23..6f0cdabe21 100644 --- a/Makefile +++ b/Makefile @@ -202,7 +202,7 @@ generate: controller-gen # Run shellcheck on runner scripts shellcheck: shellcheck-install - $(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh + $(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh hack/*.sh docker-buildx: export DOCKER_CLI_EXPERIMENTAL=enabled ;\ diff --git a/hack/check-gh-chart-versions.sh b/hack/check-gh-chart-versions.sh new file mode 100755 index 0000000000..ac5d49a916 --- /dev/null +++ b/hack/check-gh-chart-versions.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# Checks the chart versions against an input version. Fails on mismatch. +# +# Usage: +# check-gh-chart-versions.sh + +set -eo pipefail + +TEXT_RED='\033[0;31m' +TEXT_RESET='\033[0m' +TEXT_GREEN='\033[0;32m' + +target_version=$1 +if [[ $# -eq 0 ]]; then + echo "Release version argument is required" + echo + echo "Usage: ${0} " + exit 1 +fi + +chart_dir="$(pwd)/charts" + +controller_version=$(yq .version < "${chart_dir}/gha-runner-scale-set-controller/Chart.yaml") +controller_app_version=$(yq .appVersion < "${chart_dir}/gha-runner-scale-set-controller/Chart.yaml") + +scaleset_version=$(yq .version < "${chart_dir}/gha-runner-scale-set/Chart.yaml") +scaleset_app_version=$(yq .appVersion < "${chart_dir}/gha-runner-scale-set/Chart.yaml") + +if [[ "${controller_version}" != "${target_version}" ]] || + [[ "${controller_app_version}" != "${target_version}" ]] || + [[ "${scaleset_version}" != "${target_version}" ]] || + [[ "${scaleset_app_version}" != "${target_version}" ]]; then + echo -e "${TEXT_RED}Chart versions do not match${TEXT_RESET}" + echo "Target version: ${target_version}" + echo "Controller version: ${controller_version}" + echo "Controller app version: ${controller_app_version}" + echo "Scale set version: ${scaleset_version}" + echo "Scale set app version: ${scaleset_app_version}" + exit 1 +fi + +echo -e "${TEXT_GREEN}Chart versions: ${controller_version}" diff --git a/hack/make-env.sh b/hack/make-env.sh index 2562d0c7ab..343dc26cc4 100755 --- a/hack/make-env.sh +++ b/hack/make-env.sh @@ -2,7 +2,7 @@ COMMIT=$(git rev-parse HEAD) TAG=$(git describe --exact-match --abbrev=0 --tags "${COMMIT}" 2> /dev/null || true) -BRANCH=$(git branch | grep \* | cut -d ' ' -f2 | sed -e 's/[^a-zA-Z0-9+=._:/-]*//g' || true) +BRANCH=$(git branch | grep "\*" | cut -d ' ' -f2 | sed -e 's/[^a-zA-Z0-9+=._:/-]*//g' || true) VERSION="" if [ -z "$TAG" ]; then From 71764e64c5223713f5fbd164d99a6dadfd222511 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Thu, 4 May 2023 21:04:42 +0900 Subject: [PATCH 201/561] Update "People" section in README (#2537) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- README.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 338a74057e..c1046e6e52 100644 --- a/README.md +++ b/README.md @@ -6,17 +6,14 @@ ## People -`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions), mostly in their spare time. +`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with the GitHub Actions team, external maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions). -If you think the project is awesome and it's becoming a basis for your important business, consider [sponsoring us](https://github.com/sponsors/actions-runner-controller)! +If you think the project is awesome and is adding value to your business, please consider directly sponsoring [community maintainers](https://github.com/sponsors/actions-runner-controller) and individual contributors via GitHub Sponsors. In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means! -We don't currently have [any sponsors dedicated to this project yet](https://github.com/sponsors/actions-runner-controller). -However, [HelloFresh](https://www.hellofreshgroup.com/en/) has recently started sponsoring @mumoshu for this project along with his other works. A part of their sponsorship will enable @mumoshu to add an E2E test to keep ARC even more reliable on AWS. Thank you for your sponsorship! - -[](https://careers.hellofresh.com/) +See [the sponsorship dashboard](https://github.com/sponsors/actions-runner-controller) for the former and the current sponsors. ## Status From 91b586eb8a95baaaac296bb3173332a75b7dffb8 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 8 May 2023 21:24:32 +0200 Subject: [PATCH 202/561] Add link to walkthrough video on youtube (#2570) --- docs/preview/gha-runner-scale-set-controller/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index 2153414d85..03ffb1ae71 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -16,7 +16,7 @@ In addition to the increased reliability of the automatic scaling, we have worke ### Demo -https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a73e-27f5e8c75720.mp4 +[![Watch the walkthrough](https://img.youtube.com/vi/wQ0k5k6KW5Y/hqdefault.jpg)](https://youtu.be/wQ0k5k6KW5Y) ## Setup From f68fa577379c52116d4f7d9a9c3e07d53286c8d5 Mon Sep 17 00:00:00 2001 From: Seonghyeon Cho Date: Wed, 10 May 2023 03:43:15 +0900 Subject: [PATCH 203/561] docs: Update github docs links under `/managing-self-hosted-runners` (#2554) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- contrib/examples/actions-runner/values.yaml | 2 +- docs/about-arc.md | 4 ++-- docs/choosing-runner-destination.md | 2 +- docs/managing-access-with-runner-groups.md | 2 +- docs/quickstart.md | 4 ++-- docs/using-arc-runners-in-a-workflow.md | 2 +- docs/using-custom-volumes.md | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/contrib/examples/actions-runner/values.yaml b/contrib/examples/actions-runner/values.yaml index 9d88fb6c27..c593d2eb36 100644 --- a/contrib/examples/actions-runner/values.yaml +++ b/contrib/examples/actions-runner/values.yaml @@ -17,7 +17,7 @@ runnerLabels: replicaCount: 1 # The Runner Group that the runner(s) should be associated with. -# See https://docs.github.com/en/github-ae@latest/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups. +# See https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/managing-access-to-self-hosted-runners-using-groups. group: Default autoscaler: diff --git a/docs/about-arc.md b/docs/about-arc.md index 01e7b0775c..6e1473e76a 100644 --- a/docs/about-arc.md +++ b/docs/about-arc.md @@ -14,7 +14,7 @@ You can create workflows that build and test every pull request to your reposito Runners execute the job that is assigned to them by Github Actions workflow. There are two types of Runners: - [Github-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners) - GitHub provides Linux, Windows, and macOS virtual machines to run your workflows. These virtual machines are hosted in the cloud by Github. -- [Self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners) - you can host your own self-hosted runners in your own data center or cloud infrastructure. ARC deploys self-hosted runners. +- [Self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners) - you can host your own self-hosted runners in your own data center or cloud infrastructure. ARC deploys self-hosted runners. ## Self hosted runners Self-hosted runners offer more control of hardware, operating system, and software tools than GitHub-hosted runners. With self-hosted runners, you can create custom hardware configurations that meet your needs with processing power or memory to run larger jobs, install software available on your local network, and choose an operating system not offered by GitHub-hosted runners. @@ -83,7 +83,7 @@ The GitHub hosted runners include a large amount of pre-installed software packa ARC maintains a few runner images with `latest` aligning with GitHub's Ubuntu version. These images do not contain all of the software installed on the GitHub runners. They contain subset of packages from the GitHub runners: Basic CLI packages, git, docker and build-essentials. To install additional software, it is recommended to use the corresponding setup actions. For instance, `actions/setup-java` for Java or `actions/setup-node` for Node. ## Executing workflows -Now, all the setup and configuration is done. A workflow can be created in the same repository that could target the self hosted runner created from ARC. The workflow needs to have `runs-on: self-hosted` so it can target the self host pool. For more information on targeting workflows to run on self hosted runners, see "[Using Self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow)." +Now, all the setup and configuration is done. A workflow can be created in the same repository that could target the self hosted runner created from ARC. The workflow needs to have `runs-on: self-hosted` so it can target the self host pool. For more information on targeting workflows to run on self hosted runners, see "[Using Self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/using-self-hosted-runners-in-a-workflow)." ## Scaling runners - statically with replicas count With a small tweak to the replicas count (for eg - `replicas: 2`) in the `runnerdeployment.yaml` file, more runners can be created. Depending on the count of replicas, those many sets of pods would be created. As before, Each pod contains the two containers. diff --git a/docs/choosing-runner-destination.md b/docs/choosing-runner-destination.md index 3d06d0ce12..c94c1826bc 100644 --- a/docs/choosing-runner-destination.md +++ b/docs/choosing-runner-destination.md @@ -2,7 +2,7 @@ ## Usage -[GitHub self-hosted runners can be deployed at various levels in a management hierarchy](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners#about-self-hosted-runners): +[GitHub self-hosted runners can be deployed at various levels in a management hierarchy](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners#about-self-hosted-runners): - The repository level - The organization level - The enterprise level diff --git a/docs/managing-access-with-runner-groups.md b/docs/managing-access-with-runner-groups.md index 551dfc3999..f2b96f0f93 100644 --- a/docs/managing-access-with-runner-groups.md +++ b/docs/managing-access-with-runner-groups.md @@ -2,7 +2,7 @@ ## Runner Groups -Runner groups can be used to limit which repositories are able to use the GitHub Runner at an organization level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced. +Runner groups can be used to limit which repositories are able to use the GitHub Runner at an organization level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced. To add the runner to the group `NewGroup`, specify the group in your `Runner` or `RunnerDeployment` spec. diff --git a/docs/quickstart.md b/docs/quickstart.md index a278512c32..55f8048785 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -132,9 +132,9 @@ NAME READY STATUS RESTARTS AGE example-runnerdeploy2475ht2qbr 2/2 Running 0 1m ```` -Also, this runner has been registered directly to the specified repository, you can see it in repository settings. For more information, see "[Checking the status of a self-hosted runner - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/monitoring-and-troubleshooting-self-hosted-runners#checking-the-status-of-a-self-hosted-runner)." +Also, this runner has been registered directly to the specified repository, you can see it in repository settings. For more information, see "[Checking the status of a self-hosted runner - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/monitoring-and-troubleshooting-self-hosted-runners#checking-the-status-of-a-self-hosted-runner)." -:two: You are ready to execute workflows against this self-hosted runner. For more information, see "[Using self-hosted runners in a workflow - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow#using-self-hosted-runners-in-a-workflow)." +:two: You are ready to execute workflows against this self-hosted runner. For more information, see "[Using self-hosted runners in a workflow - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/using-self-hosted-runners-in-a-workflow#using-self-hosted-runners-in-a-workflow)." There is also a quick start guide to get started on Actions, For more information, please refer to "[Quick start Guide to GitHub Actions](https://docs.github.com/en/actions/quickstart)." diff --git a/docs/using-arc-runners-in-a-workflow.md b/docs/using-arc-runners-in-a-workflow.md index 6d1fb459c9..cd287e0bd1 100644 --- a/docs/using-arc-runners-in-a-workflow.md +++ b/docs/using-arc-runners-in-a-workflow.md @@ -37,4 +37,4 @@ jobs: When using labels there are a few things to be aware of: 1. `self-hosted` is implict with every runner as this is an automatic label GitHub apply to any self-hosted runner. As a result ARC can treat all runners as having this label without having it explicitly defined in a runner's manifest. You do not need to explicitly define this label in your runner manifests (you can if you want though). -2. In addition to the `self-hosted` label, GitHub also applies a few other [default](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow#using-default-labels-to-route-jobs) labels to any self-hosted runner. The other default labels relate to the architecture of the runner and so can't be implicitly applied by ARC as ARC doesn't know if the runner is `linux` or `windows`, `x64` or `ARM64` etc. If you wish to use these labels in your workflows and have ARC scale runners accurately you must also add them to your runner manifests. \ No newline at end of file +2. In addition to the `self-hosted` label, GitHub also applies a few other [default](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/using-self-hosted-runners-in-a-workflow#using-default-labels-to-route-jobs) labels to any self-hosted runner. The other default labels relate to the architecture of the runner and so can't be implicitly applied by ARC as ARC doesn't know if the runner is `linux` or `windows`, `x64` or `ARM64` etc. If you wish to use these labels in your workflows and have ARC scale runners accurately you must also add them to your runner manifests. \ No newline at end of file diff --git a/docs/using-custom-volumes.md b/docs/using-custom-volumes.md index bfdf8d77b6..57771c8cef 100644 --- a/docs/using-custom-volumes.md +++ b/docs/using-custom-volumes.md @@ -160,7 +160,7 @@ spec: ### PV-backed runner work directory -ARC works by automatically creating runner pods for running [`actions/runner`](https://github.com/actions/runner) and [running `config.sh`](https://docs.github.com/en/actions/hosting-your-own-runners/adding-self-hosted-runners#adding-a-self-hosted-runner-to-a-repository) which you had to ran manually without ARC. +ARC works by automatically creating runner pods for running [`actions/runner`](https://github.com/actions/runner) and [running `config.sh`](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/adding-self-hosted-runners#adding-a-self-hosted-runner-to-a-repository) which you had to ran manually without ARC. `config.sh` is the script provided by `actions/runner` to pre-configure the runner process before being started. One of the options provided by `config.sh` is `--work`, which specifies the working directory where the runner runs your workflow jobs in. From 09bdfa2c3878ae3698566dff6d10dc8346a702a1 Mon Sep 17 00:00:00 2001 From: "Y. Luis" Date: Tue, 9 May 2023 19:45:18 +0100 Subject: [PATCH 204/561] Fixed scaling runners doc link (#2474) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- contrib/examples/actions-runner/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/examples/actions-runner/README.md b/contrib/examples/actions-runner/README.md index 8e24f00f17..1c8a352a2e 100644 --- a/contrib/examples/actions-runner/README.md +++ b/contrib/examples/actions-runner/README.md @@ -31,6 +31,6 @@ All additional docs are kept in the `docs/` folder, this README is solely for do | `autoscaler.enabled` | Enable the HorizontalRunnerAutoscaler, if its enabled then replica count will not be used | true | | `autoscaler.minReplicas` | Minimum no of replicas | 1 | | `autoscaler.maxReplicas` | Maximum no of replicas | 5 | -| `autoscaler.scaleDownDelaySecondsAfterScaleOut` | [Anti-Flapping Configuration](https://github.com/actions/actions-runner-controller#anti-flapping-configuration) | 120 | -| `autoscaler.metrics` | [Pull driven scaling](https://github.com/actions/actions-runner-controller#pull-driven-scaling) | default | -| `autoscaler.scaleUpTriggers` | [Webhook driven scaling](https://github.com/actions/actions-runner-controller#webhook-driven-scaling) | | +| `autoscaler.scaleDownDelaySecondsAfterScaleOut` | [Anti-Flapping Configuration](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#anti-flapping-configuration) | 120 | +| `autoscaler.metrics` | [Pull driven scaling](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#pull-driven-scaling) | default | +| `autoscaler.scaleUpTriggers` | [Webhook driven scaling](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#webhook-driven-scaling) | | From 6e3966e32de299cab126a9b61e96577dc0f32e50 Mon Sep 17 00:00:00 2001 From: kahirokunn Date: Wed, 10 May 2023 23:39:54 +0900 Subject: [PATCH 205/561] docs: use INSTALLATION_NAME (#2552) Signed-off-by: kahirokunn --- .../gha-runner-scale-set-controller/README.md | 31 +++++++++---------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index 03ffb1ae71..6c7839cede 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -68,7 +68,7 @@ In addition to the increased reliability of the automatic scaling, we have worke GITHUB_APP_ID="" GITHUB_APP_INSTALLATION_ID="" GITHUB_APP_PRIVATE_KEY="" - helm install arc-runner-set \ + helm install "${INSTALLATION_NAME}" \ --namespace "${NAMESPACE}" \ --create-namespace \ --set githubConfigUrl="${GITHUB_CONFIG_URL}" \ @@ -101,13 +101,12 @@ In addition to the increased reliability of the automatic scaling, we have worke ```yaml name: Test workflow on: - workflow_dispatch: - + workflow_dispatch: jobs: - test: + test: runs-on: arc-runner-set - steps: - - name: Hello world + steps: + - name: Hello world run: echo "Hello world" ``` @@ -209,10 +208,10 @@ To fix this, you can either: ```yaml spec: - securityContext: - fsGroup: 123 - containers: - - name: runner + securityContext: + fsGroup: 123 + containers: + - name: runner image: ghcr.io/actions/actions-runner: # Replace with the version you want to use command: ["/home/runner/run.sh"] ``` @@ -222,15 +221,15 @@ To fix this, you can either: ```yaml template: spec: - initContainers: - - name: kube-init + initContainers: + - name: kube-init image: ghcr.io/actions/actions-runner:latest command: ["sudo", "chown", "-R", "1001:123", "/home/runner/_work"] volumeMounts: - - name: work - mountPath: /home/runner/_work - containers: - - name: runner + - name: work + mountPath: /home/runner/_work + containers: + - name: runner image: ghcr.io/actions/actions-runner:latest command: ["/home/runner/run.sh"] ``` From 4d828f6692cbb9ac343c31c18a59e59c3bccda17 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Fri, 12 May 2023 11:52:24 +0200 Subject: [PATCH 206/561] Fix update runners scheduled workflow to check for container-hooks upgrades (#2576) --- .github/workflows/release-runners.yaml | 21 +++-- .github/workflows/update-runners.yaml | 82 ++++++++++++++----- runner/Makefile | 2 +- runner/VERSION | 3 +- ...nner-dind-rootless.ubuntu-20.04.dockerfile | 2 +- ...nner-dind-rootless.ubuntu-22.04.dockerfile | 2 +- ...ctions-runner-dind.ubuntu-20.04.dockerfile | 2 +- ...ctions-runner-dind.ubuntu-22.04.dockerfile | 2 +- runner/actions-runner.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner.ubuntu-22.04.dockerfile | 2 +- 10 files changed, 82 insertions(+), 38 deletions(-) diff --git a/.github/workflows/release-runners.yaml b/.github/workflows/release-runners.yaml index 7a2334fada..ca39e185ad 100644 --- a/.github/workflows/release-runners.yaml +++ b/.github/workflows/release-runners.yaml @@ -1,4 +1,4 @@ -name: Runners +name: Release Runner Images # Revert to https://github.com/actions-runner-controller/releases#releases # for details on why we use this approach @@ -18,7 +18,6 @@ env: TARGET_ORG: actions-runner-controller TARGET_WORKFLOW: release-runners.yaml DOCKER_VERSION: 20.10.23 - RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0 jobs: build-runners: @@ -27,10 +26,12 @@ jobs: steps: - uses: actions/checkout@v3 - name: Get runner version - id: runner_version + id: versions run: | - version=$(echo -n $(cat runner/VERSION)) - echo runner_version=$version >> $GITHUB_OUTPUT + runner_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))" + container_hooks_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))" + echo runner_version=$runner_current_version >> $GITHUB_OUTPUT + echo container_hooks_version=$container_hooks_current_version >> $GITHUB_OUTPUT - name: Get Token id: get_workflow_token @@ -42,7 +43,8 @@ jobs: - name: Trigger Build And Push Runner Images To Registries env: - RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }} + RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }} + CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }} run: | # Authenticate gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }} @@ -51,20 +53,21 @@ jobs: gh workflow run ${{ env.TARGET_WORKFLOW }} -R ${{ env.TARGET_ORG }}/releases \ -f runner_version=${{ env.RUNNER_VERSION }} \ -f docker_version=${{ env.DOCKER_VERSION }} \ - -f runner_container_hooks_version=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }} \ + -f runner_container_hooks_version=${{ env.CONTAINER_HOOKS_VERSION }} \ -f sha='${{ github.sha }}' \ -f push_to_registries=${{ env.PUSH_TO_REGISTRIES }} - name: Job summary env: - RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }} + RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }} + CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }} run: | echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY echo "- runner_version: ${{ env.RUNNER_VERSION }}" >> $GITHUB_STEP_SUMMARY echo "- docker_version: ${{ env.DOCKER_VERSION }}" >> $GITHUB_STEP_SUMMARY - echo "- runner_container_hooks_version: ${{ env.RUNNER_CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY + echo "- runner_container_hooks_version: ${{ env.CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY echo "- push_to_registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/update-runners.yaml b/.github/workflows/update-runners.yaml index cf39ee1d99..1fc896ec91 100644 --- a/.github/workflows/update-runners.yaml +++ b/.github/workflows/update-runners.yaml @@ -16,21 +16,34 @@ jobs: env: GH_TOKEN: ${{ github.token }} outputs: - current_version: ${{ steps.versions.outputs.current_version }} - latest_version: ${{ steps.versions.outputs.latest_version }} + runner_current_version: ${{ steps.runner_versions.outputs.runner_current_version }} + runner_latest_version: ${{ steps.runner_versions.outputs.runner_latest_version }} + container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }} + container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }} steps: - uses: actions/checkout@v3 - - name: Get current and latest versions - id: versions + - name: Get runner current and latest versions + id: runner_versions run: | - CURRENT_VERSION=$(echo -n $(cat runner/VERSION)) + CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))" echo "Current version: $CURRENT_VERSION" - echo current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT + echo runner_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1) echo "Latest version: $LATEST_VERSION" - echo latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT + echo runner_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT + + - name: Get container-hooks current and latest versions + id: container_hooks_versions + run: | + CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))" + echo "Current version: $CURRENT_VERSION" + echo container_hooks_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT + + LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner-container-hooks | grep -oP '(?<=v)[0-9.]+' | head -1) + echo "Latest version: $LATEST_VERSION" + echo container_hooks_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT # check_pr checks if a PR for the same update already exists. It only runs if # runner latest version != our current version. If no existing PR is found, @@ -38,7 +51,7 @@ jobs: check_pr: runs-on: ubuntu-latest needs: check_versions - if: needs.check_versions.outputs.current_version != needs.check_versions.outputs.latest_version + if: needs.check_versions.outputs.runner_current_version != needs.check_versions.outputs.runner_latest_version || needs.check_versions.outputs.container_hooks_current_version != needs.check_versions.outputs.container_hooks_latest_version outputs: pr_name: ${{ steps.pr_name.outputs.pr_name }} env: @@ -46,17 +59,36 @@ jobs: steps: - name: debug run: - echo ${{ needs.check_versions.outputs.current_version }} - echo ${{ needs.check_versions.outputs.latest_version }} + echo "RUNNER_CURRENT_VERSION=${{ needs.check_versions.outputs.runner_current_version }}" + echo "RUNNER_LATEST_VERSION=${{ needs.check_versions.outputs.runner_latest_version }}" + echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}" + echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}" + - uses: actions/checkout@v3 - name: PR Name id: pr_name env: - LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }} + RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }} + RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }} + CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }} + CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }} + # Generate a PR name with the following title: + # Updates: runner to v2.304.0 and container-hooks to v0.3.1 run: | - PR_NAME="Update runner to version ${LATEST_VERSION}" - + RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}" + CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}" + + PR_NAME="Updates:" + if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ] + then + PR_NAME="$PR_NAME $RUNNER_MESSAGE" + fi + if [ "$CONTAINER_HOOKS_CURRENT_VERSION" != "$CONTAINER_HOOKS_LATEST_VERSION" ] + then + PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE" + fi + result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1) if [ -z "$result" ] then @@ -80,21 +112,29 @@ jobs: actions: write env: GH_TOKEN: ${{ github.token }} - CURRENT_VERSION: ${{ needs.check_versions.outputs.current_version }} - LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }} + RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }} + RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }} + CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }} + CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }} PR_NAME: ${{ needs.check_pr.outputs.pr_name }} steps: - uses: actions/checkout@v3 + - name: New branch - run: git checkout -b update-runner-$LATEST_VERSION + run: git checkout -b update-runner-"$(date +%Y-%m-%d)" + - name: Update files run: | - sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION - sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile - sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile - sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go - sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" .github/workflows/e2e-test-linux-vm.yaml + sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/VERSION + sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/Makefile + sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" Makefile + sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" test/e2e/e2e_test.go + + sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/VERSION + sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/Makefile + sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" Makefile + sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" test/e2e/e2e_test.go - name: Commit changes run: | diff --git a/runner/Makefile b/runner/Makefile index d404ab4843..a6ac424606 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) -RUNNER_VERSION ?= 2.304.0 +RUNNER_VERSION ?= 2.304.0 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.2.0 DOCKER_VERSION ?= 20.10.23 diff --git a/runner/VERSION b/runner/VERSION index 971440973d..591bddba99 100644 --- a/runner/VERSION +++ b/runner/VERSION @@ -1 +1,2 @@ -2.304.0 +RUNNER_VERSION=2.304.0 +RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 \ No newline at end of file diff --git a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile index 33d3c3d22e..f8875bc9e2 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM ARG RUNNER_VERSION -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 +ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ENV CHANNEL=stable ARG DOCKER_COMPOSE_VERSION=v2.16.0 diff --git a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile index 3e35d183fd..06621bd7a6 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM ARG RUNNER_VERSION -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 +ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ENV CHANNEL=stable ARG DOCKER_COMPOSE_VERSION=v2.16.0 diff --git a/runner/actions-runner-dind.ubuntu-20.04.dockerfile b/runner/actions-runner-dind.ubuntu-20.04.dockerfile index 053ccc1c1b..c69d3e0903 100644 --- a/runner/actions-runner-dind.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-20.04.dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM ARG RUNNER_VERSION -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 +ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.23 diff --git a/runner/actions-runner-dind.ubuntu-22.04.dockerfile b/runner/actions-runner-dind.ubuntu-22.04.dockerfile index 6ee33dd236..03ee37a2a1 100644 --- a/runner/actions-runner-dind.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-22.04.dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM ARG RUNNER_VERSION -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 +ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.23 diff --git a/runner/actions-runner.ubuntu-20.04.dockerfile b/runner/actions-runner.ubuntu-20.04.dockerfile index 83d55bbab6..a5c7d0a40c 100644 --- a/runner/actions-runner.ubuntu-20.04.dockerfile +++ b/runner/actions-runner.ubuntu-20.04.dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:20.04 ARG TARGETPLATFORM ARG RUNNER_VERSION -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 +ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.23 diff --git a/runner/actions-runner.ubuntu-22.04.dockerfile b/runner/actions-runner.ubuntu-22.04.dockerfile index b0e5fcea2e..82a43d2ca3 100644 --- a/runner/actions-runner.ubuntu-22.04.dockerfile +++ b/runner/actions-runner.ubuntu-22.04.dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:22.04 ARG TARGETPLATFORM ARG RUNNER_VERSION -ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 +ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.23 From c9385e24ced9e3dc81c23bcb8f6e6c35d0c2bc49 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 12 May 2023 05:55:09 -0400 Subject: [PATCH 207/561] Updates: container-hooks to v0.3.1 (#2580) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- runner/Makefile | 2 +- runner/VERSION | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/runner/Makefile b/runner/Makefile index a6ac424606..fd70582c58 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -7,7 +7,7 @@ OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) RUNNER_VERSION ?= 2.304.0 -RUNNER_CONTAINER_HOOKS_VERSION ?= 0.2.0 +RUNNER_CONTAINER_HOOKS_VERSION ?= 0.3.1 DOCKER_VERSION ?= 20.10.23 # default list of platforms for which multiarch image is built diff --git a/runner/VERSION b/runner/VERSION index 591bddba99..f10fec25cc 100644 --- a/runner/VERSION +++ b/runner/VERSION @@ -1,2 +1,2 @@ RUNNER_VERSION=2.304.0 -RUNNER_CONTAINER_HOOKS_VERSION=0.2.0 \ No newline at end of file +RUNNER_CONTAINER_HOOKS_VERSION=0.3.1 \ No newline at end of file From 250e7596db80e584b3b017ee69cd5fc2a91519b4 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Fri, 12 May 2023 22:10:59 +0900 Subject: [PATCH 208/561] Bump chart version to v0.23.3 for ARC v0.27.4 (#2577) --- charts/actions-runner-controller/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/actions-runner-controller/Chart.yaml b/charts/actions-runner-controller/Chart.yaml index a490cd63f4..90ac6023a0 100644 --- a/charts/actions-runner-controller/Chart.yaml +++ b/charts/actions-runner-controller/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.23.2 +version: 0.23.3 # Used as the default manager tag value when no tag property is provided in the values.yaml -appVersion: 0.27.3 +appVersion: 0.27.4 home: https://github.com/actions/actions-runner-controller From 30807de6d4a5091b933e09ae14fb01938b04a51e Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 15 May 2023 14:31:18 +0200 Subject: [PATCH 209/561] Apply naming convention to workflows (#2581) Co-authored-by: John Sudol <24583161+johnsudol@users.noreply.github.com> --- ...lish-chart.yaml => arc-publish-chart.yaml} | 5 ++-- .../{publish-arc.yaml => arc-publish.yaml} | 2 +- ...-runners.yaml => arc-release-runners.yaml} | 4 +-- ...yaml => arc-update-runners-scheduled.yaml} | 2 +- ...ate-chart.yaml => arc-validate-chart.yaml} | 4 +-- ...runners.yaml => arc-validate-runners.yaml} | 2 +- ...-test-linux-vm.yaml => gha-e2e-tests.yaml} | 2 +- ...-scale-set.yaml => gha-publish-chart.yaml} | 4 +-- ...gha-chart.yaml => gha-validate-chart.yaml} | 6 ++-- ...canary.yaml => global-publish-canary.yaml} | 28 +++++++++---------- ...run-codeql.yaml => global-run-codeql.yaml} | 0 ...yaml => global-run-first-interaction.yaml} | 2 +- .../{run-stale.yaml => global-run-stale.yaml} | 0 .../2023-02-02-automate-runner-updates.md | 4 +-- docs/adrs/2023-03-17-workflow-improvements.md | 2 +- 15 files changed, 34 insertions(+), 33 deletions(-) rename .github/workflows/{publish-chart.yaml => arc-publish-chart.yaml} (98%) rename .github/workflows/{publish-arc.yaml => arc-publish.yaml} (99%) rename .github/workflows/{release-runners.yaml => arc-release-runners.yaml} (97%) rename .github/workflows/{update-runners.yaml => arc-update-runners-scheduled.yaml} (99%) rename .github/workflows/{validate-chart.yaml => arc-validate-chart.yaml} (96%) rename .github/workflows/{validate-runners.yaml => arc-validate-runners.yaml} (97%) rename .github/workflows/{e2e-test-linux-vm.yaml => gha-e2e-tests.yaml} (99%) rename .github/workflows/{publish-runner-scale-set.yaml => gha-publish-chart.yaml} (97%) rename .github/workflows/{validate-gha-chart.yaml => gha-validate-chart.yaml} (95%) rename .github/workflows/{publish-canary.yaml => global-publish-canary.yaml} (86%) rename .github/workflows/{run-codeql.yaml => global-run-codeql.yaml} (100%) rename .github/workflows/{run-first-interaction.yaml => global-run-first-interaction.yaml} (97%) rename .github/workflows/{run-stale.yaml => global-run-stale.yaml} (100%) diff --git a/.github/workflows/publish-chart.yaml b/.github/workflows/arc-publish-chart.yaml similarity index 98% rename from .github/workflows/publish-chart.yaml rename to .github/workflows/arc-publish-chart.yaml index 7e98a765b4..54785bbcec 100644 --- a/.github/workflows/publish-chart.yaml +++ b/.github/workflows/arc-publish-chart.yaml @@ -1,4 +1,4 @@ -name: Publish Helm Chart +name: Publish ARC Helm Charts # Revert to https://github.com/actions-runner-controller/releases#releases # for details on why we use this approach @@ -8,7 +8,7 @@ on: - master paths: - 'charts/**' - - '.github/workflows/publish-chart.yaml' + - '.github/workflows/arc-publish-chart.yaml' - '!charts/actions-runner-controller/docs/**' - '!charts/gha-runner-scale-set-controller/**' - '!charts/gha-runner-scale-set/**' @@ -171,6 +171,7 @@ jobs: --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ --index-path ${{ github.workspace }}/index.yaml \ + --token ${{ secrets.GITHUB_TOKEN }} \ --push \ --pages-branch 'gh-pages' \ --pages-index-path 'index.yaml' diff --git a/.github/workflows/publish-arc.yaml b/.github/workflows/arc-publish.yaml similarity index 99% rename from .github/workflows/publish-arc.yaml rename to .github/workflows/arc-publish.yaml index 4c18c255d6..fb23500ddd 100644 --- a/.github/workflows/publish-arc.yaml +++ b/.github/workflows/arc-publish.yaml @@ -1,4 +1,4 @@ -name: Publish ARC +name: Publish ARC Image # Revert to https://github.com/actions-runner-controller/releases#releases # for details on why we use this approach diff --git a/.github/workflows/release-runners.yaml b/.github/workflows/arc-release-runners.yaml similarity index 97% rename from .github/workflows/release-runners.yaml rename to .github/workflows/arc-release-runners.yaml index ca39e185ad..8adad96abf 100644 --- a/.github/workflows/release-runners.yaml +++ b/.github/workflows/arc-release-runners.yaml @@ -1,4 +1,4 @@ -name: Release Runner Images +name: Release ARC Runner Images # Revert to https://github.com/actions-runner-controller/releases#releases # for details on why we use this approach @@ -10,7 +10,7 @@ on: - 'master' paths: - 'runner/VERSION' - - '.github/workflows/release-runners.yaml' + - '.github/workflows/arc-release-runners.yaml' env: # Safeguard to prevent pushing images to registeries after build diff --git a/.github/workflows/update-runners.yaml b/.github/workflows/arc-update-runners-scheduled.yaml similarity index 99% rename from .github/workflows/update-runners.yaml rename to .github/workflows/arc-update-runners-scheduled.yaml index 1fc896ec91..1fb1153b4e 100644 --- a/.github/workflows/update-runners.yaml +++ b/.github/workflows/arc-update-runners-scheduled.yaml @@ -1,6 +1,6 @@ # This workflows polls releases from actions/runner and in case of a new one it # updates files containing runner version and opens a pull request. -name: Update runners +name: Runner Updates Check (Scheduled Job) on: schedule: diff --git a/.github/workflows/validate-chart.yaml b/.github/workflows/arc-validate-chart.yaml similarity index 96% rename from .github/workflows/validate-chart.yaml rename to .github/workflows/arc-validate-chart.yaml index 5475649cbf..6adccc3e65 100644 --- a/.github/workflows/validate-chart.yaml +++ b/.github/workflows/arc-validate-chart.yaml @@ -6,7 +6,7 @@ on: - master paths: - 'charts/**' - - '.github/workflows/validate-chart.yaml' + - '.github/workflows/arc-validate-chart.yaml' - '!charts/actions-runner-controller/docs/**' - '!**.md' - '!charts/gha-runner-scale-set-controller/**' @@ -14,7 +14,7 @@ on: push: paths: - 'charts/**' - - '.github/workflows/validate-chart.yaml' + - '.github/workflows/arc-validate-chart.yaml' - '!charts/actions-runner-controller/docs/**' - '!**.md' - '!charts/gha-runner-scale-set-controller/**' diff --git a/.github/workflows/validate-runners.yaml b/.github/workflows/arc-validate-runners.yaml similarity index 97% rename from .github/workflows/validate-runners.yaml rename to .github/workflows/arc-validate-runners.yaml index cab0eb7072..42380e911d 100644 --- a/.github/workflows/validate-runners.yaml +++ b/.github/workflows/arc-validate-runners.yaml @@ -1,4 +1,4 @@ -name: Validate Runners +name: Validate ARC Runners on: pull_request: diff --git a/.github/workflows/e2e-test-linux-vm.yaml b/.github/workflows/gha-e2e-tests.yaml similarity index 99% rename from .github/workflows/e2e-test-linux-vm.yaml rename to .github/workflows/gha-e2e-tests.yaml index 35dafedd41..65230a2e19 100644 --- a/.github/workflows/e2e-test-linux-vm.yaml +++ b/.github/workflows/gha-e2e-tests.yaml @@ -1,4 +1,4 @@ -name: CI ARC E2E Linux VM Test +name: (gha) E2E Tests on: push: diff --git a/.github/workflows/publish-runner-scale-set.yaml b/.github/workflows/gha-publish-chart.yaml similarity index 97% rename from .github/workflows/publish-runner-scale-set.yaml rename to .github/workflows/gha-publish-chart.yaml index 12029e9a82..f0e0107775 100644 --- a/.github/workflows/publish-runner-scale-set.yaml +++ b/.github/workflows/gha-publish-chart.yaml @@ -1,4 +1,4 @@ -name: Publish Runner Scale Set Controller Charts +name: (gha) Publish Helm Charts on: workflow_dispatch: @@ -101,7 +101,7 @@ jobs: - name: Job summary run: | - echo "The [publish-runner-scale-set.yaml](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/publish-runner-scale-set.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY + echo "The [gha-publish-chart.yaml](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/gha-publish-chart.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/validate-gha-chart.yaml b/.github/workflows/gha-validate-chart.yaml similarity index 95% rename from .github/workflows/validate-gha-chart.yaml rename to .github/workflows/gha-validate-chart.yaml index 645b32e9d7..5b1b456727 100644 --- a/.github/workflows/validate-gha-chart.yaml +++ b/.github/workflows/gha-validate-chart.yaml @@ -1,4 +1,4 @@ -name: Validate Helm Chart (gha-runner-scale-set-controller and gha-runner-scale-set) +name: (gha) Validate Helm Charts on: pull_request: @@ -6,13 +6,13 @@ on: - master paths: - 'charts/**' - - '.github/workflows/validate-gha-chart.yaml' + - '.github/workflows/gha-validate-chart.yaml' - '!charts/actions-runner-controller/**' - '!**.md' push: paths: - 'charts/**' - - '.github/workflows/validate-gha-chart.yaml' + - '.github/workflows/gha-validate-chart.yaml' - '!charts/actions-runner-controller/**' - '!**.md' workflow_dispatch: diff --git a/.github/workflows/publish-canary.yaml b/.github/workflows/global-publish-canary.yaml similarity index 86% rename from .github/workflows/publish-canary.yaml rename to .github/workflows/global-publish-canary.yaml index 984cd52325..27579084b8 100644 --- a/.github/workflows/publish-canary.yaml +++ b/.github/workflows/global-publish-canary.yaml @@ -1,4 +1,4 @@ -name: Publish Canary Image +name: Publish Canary Images # Revert to https://github.com/actions-runner-controller/releases#releases # for details on why we use this approach @@ -11,19 +11,19 @@ on: - '.github/actions/**' - '.github/ISSUE_TEMPLATE/**' - '.github/workflows/e2e-test-dispatch-workflow.yaml' - - '.github/workflows/e2e-test-linux-vm.yaml' - - '.github/workflows/publish-arc.yaml' - - '.github/workflows/publish-chart.yaml' - - '.github/workflows/publish-runner-scale-set.yaml' - - '.github/workflows/release-runners.yaml' - - '.github/workflows/run-codeql.yaml' - - '.github/workflows/run-first-interaction.yaml' - - '.github/workflows/run-stale.yaml' - - '.github/workflows/update-runners.yaml' + - '.github/workflows/gha-e2e-tests.yaml' + - '.github/workflows/arc-publish.yaml' + - '.github/workflows/arc-publish-chart.yaml' + - '.github/workflows/gha-publish-chart.yaml' + - '.github/workflows/arc-release-runners.yaml' + - '.github/workflows/global-run-codeql.yaml' + - '.github/workflows/global-run-first-interaction.yaml' + - '.github/workflows/global-run-stale.yaml' + - '.github/workflows/arc-update-runners-scheduled.yaml' - '.github/workflows/validate-arc.yaml' - - '.github/workflows/validate-chart.yaml' - - '.github/workflows/validate-gha-chart.yaml' - - '.github/workflows/validate-runners.yaml' + - '.github/workflows/arc-validate-chart.yaml' + - '.github/workflows/gha-validate-chart.yaml' + - '.github/workflows/arc-validate-runners.yaml' - '.github/dependabot.yml' - '.github/RELEASE_NOTE_TEMPLATE.md' - 'runner/**' @@ -126,4 +126,4 @@ jobs: ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary-${{ steps.resolve_parameters.outputs.short_sha }} cache-from: type=gha - cache-to: type=gha,mode=max \ No newline at end of file + cache-to: type=gha,mode=max diff --git a/.github/workflows/run-codeql.yaml b/.github/workflows/global-run-codeql.yaml similarity index 100% rename from .github/workflows/run-codeql.yaml rename to .github/workflows/global-run-codeql.yaml diff --git a/.github/workflows/run-first-interaction.yaml b/.github/workflows/global-run-first-interaction.yaml similarity index 97% rename from .github/workflows/run-first-interaction.yaml rename to .github/workflows/global-run-first-interaction.yaml index 908deccb85..ce1139a581 100644 --- a/.github/workflows/run-first-interaction.yaml +++ b/.github/workflows/global-run-first-interaction.yaml @@ -1,4 +1,4 @@ -name: first-interaction +name: First Interaction on: issues: diff --git a/.github/workflows/run-stale.yaml b/.github/workflows/global-run-stale.yaml similarity index 100% rename from .github/workflows/run-stale.yaml rename to .github/workflows/global-run-stale.yaml diff --git a/docs/adrs/2023-02-02-automate-runner-updates.md b/docs/adrs/2023-02-02-automate-runner-updates.md index c3bb5c4df4..7e0f7f0cd6 100644 --- a/docs/adrs/2023-02-02-automate-runner-updates.md +++ b/docs/adrs/2023-02-02-automate-runner-updates.md @@ -10,7 +10,7 @@ When a new [runner](https://github.com/actions/runner) version is released, new images need to be built in [actions-runner-controller/releases](https://github.com/actions-runner-controller/releases). This is currently started by the -[release-runners](https://github.com/actions/actions-runner-controller/blob/master/.github/workflows/release-runners.yaml) +[release-runners](https://github.com/actions/actions-runner-controller/blob/master/.github/workflows/arc-release-runners.yaml) workflow, although this only starts when the set of file containing the runner version is updated (and this is currently done manually). @@ -19,7 +19,7 @@ version is updated (and this is currently done manually). We can have another workflow running on a cadence (hourly seems sensible) and checking for new runner releases, creating a PR updating `RUNNER_VERSION` in: -- `.github/workflows/release-runners.yaml` +- `.github/workflows/arc-release-runners.yaml` - `Makefile` - `runner/Makefile` - `test/e2e/e2e_test.go` diff --git a/docs/adrs/2023-03-17-workflow-improvements.md b/docs/adrs/2023-03-17-workflow-improvements.md index 38d611aa24..f85bd9b9a2 100644 --- a/docs/adrs/2023-03-17-workflow-improvements.md +++ b/docs/adrs/2023-03-17-workflow-improvements.md @@ -26,7 +26,7 @@ At the moment we have three workflows that validate Go code: - [Validate ARC](https://github.com/actions/actions-runner-controller/blob/01e9dd3/.github/workflows/validate-arc.yaml): this is a bit of a catch-all workflow, other than Go tests this also validates Kubernetes manifests, runs `go generate`, `go fmt` and `go vet` -- [Run CodeQL](https://github.com/actions/actions-runner-controller/blob/a095f0b66aad5fbc8aa8d7032f3299233e4c84d2/.github/workflows/run-codeql.yaml) +- [Run CodeQL](https://github.com/actions/actions-runner-controller/blob/master/.github/workflows/global-run-codeql.yaml) ### Proposal From 0649ff3de4878ad7d8420e92917cf1b6e47892ad Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 15 May 2023 16:12:03 +0200 Subject: [PATCH 210/561] Fix broken chart validation workflows (#2589) --- .github/workflows/arc-validate-chart.yaml | 2 +- .github/workflows/gha-validate-chart.yaml | 18 +----------------- .github/workflows/go.yaml | 9 +++++---- 3 files changed, 7 insertions(+), 22 deletions(-) diff --git a/.github/workflows/arc-validate-chart.yaml b/.github/workflows/arc-validate-chart.yaml index 6adccc3e65..ed3b3ac37f 100644 --- a/.github/workflows/arc-validate-chart.yaml +++ b/.github/workflows/arc-validate-chart.yaml @@ -65,7 +65,7 @@ jobs: python-version: '3.7' - name: Set up chart-testing - uses: helm/chart-testing-action@v2.3.1 + uses: helm/chart-testing-action@v2.4.0 - name: Run chart-testing (list-changed) id: list-changed diff --git a/.github/workflows/gha-validate-chart.yaml b/.github/workflows/gha-validate-chart.yaml index 5b1b456727..d616d9ac62 100644 --- a/.github/workflows/gha-validate-chart.yaml +++ b/.github/workflows/gha-validate-chart.yaml @@ -61,23 +61,7 @@ jobs: python-version: '3.7' - name: Set up chart-testing - uses: helm/chart-testing-action@v2.3.1 - - - name: Set up latest version chart-testing - run: | - echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list - sudo apt update - sudo apt install goreleaser - git clone https://github.com/helm/chart-testing - cd chart-testing - unset CT_CONFIG_DIR - goreleaser build --clean --skip-validate - ./dist/chart-testing_linux_amd64_v1/ct version - echo 'Adding ct directory to PATH...' - echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH" - echo 'Setting CT_CONFIG_DIR...' - echo "CT_CONFIG_DIR=$RUNNER_TEMP/chart-testing/etc" >> "$GITHUB_ENV" - working-directory: ${{ runner.temp }} + uses: helm/chart-testing-action@v2.4.0 - name: Run chart-testing (list-changed) id: list-changed diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index 153bf8b3e3..6c13bac1be 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -8,7 +8,6 @@ on: - '**.go' - 'go.mod' - 'go.sum' - pull_request: paths: - '.github/workflows/go.yaml' @@ -72,9 +71,11 @@ jobs: run: git diff --exit-code - name: Install kubebuilder run: | - curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz - tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz - sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder + curl -D headers.txt -fsL "https://storage.googleapis.com/kubebuilder-tools/kubebuilder-tools-1.26.1-linux-amd64.tar.gz" -o kubebuilder-tools + echo "$(grep -i etag headers.txt -m 1 | cut -d'"' -f2) kubebuilder-tools" > sum + md5sum -c sum + tar -zvxf kubebuilder-tools + sudo mv kubebuilder /usr/local/ - name: Run go tests run: | go test -short `go list ./... | grep -v ./test_e2e_arc` From 0a2f5fef35a7a29c12aee110434e3c0ac05e8698 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 17 May 2023 05:57:23 -0400 Subject: [PATCH 211/561] Updates: container-hooks to v0.3.2 (#2597) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- runner/Makefile | 2 +- runner/VERSION | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/runner/Makefile b/runner/Makefile index fd70582c58..e57da33fa6 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -7,7 +7,7 @@ OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) RUNNER_VERSION ?= 2.304.0 -RUNNER_CONTAINER_HOOKS_VERSION ?= 0.3.1 +RUNNER_CONTAINER_HOOKS_VERSION ?= 0.3.2 DOCKER_VERSION ?= 20.10.23 # default list of platforms for which multiarch image is built diff --git a/runner/VERSION b/runner/VERSION index f10fec25cc..97c625c545 100644 --- a/runner/VERSION +++ b/runner/VERSION @@ -1,2 +1,2 @@ RUNNER_VERSION=2.304.0 -RUNNER_CONTAINER_HOOKS_VERSION=0.3.1 \ No newline at end of file +RUNNER_CONTAINER_HOOKS_VERSION=0.3.2 \ No newline at end of file From 53a39773fb287b79ed60b2829ea84af9309734bd Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Wed, 17 May 2023 13:42:35 +0200 Subject: [PATCH 212/561] Update CONTRIBUTING.md with new contribution guidelines and release process documentation (#2596) Co-authored-by: John Sudol <24583161+johnsudol@users.noreply.github.com> --- .gitattributes | 1 + CONTRIBUTING.md | 164 +++++++++++++++++- .../gha-runner-scale-set-controller/README.md | 12 +- .../thumbnail.png | 3 + 4 files changed, 167 insertions(+), 13 deletions(-) create mode 100644 .gitattributes create mode 100644 docs/preview/gha-runner-scale-set-controller/thumbnail.png diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..24a8e87939 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.png filter=lfs diff=lfs merge=lfs -text diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2399a8ce11..9207c3d920 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,6 +15,13 @@ - [Opening the Pull Request](#opening-the-pull-request) - [Helm Version Changes](#helm-version-changes) - [Testing Controller Built from a Pull Request](#testing-controller-built-from-a-pull-request) + - [Release process](#release-process) + - [Workflow structure](#workflow-structure) + - [Releasing legacy actions-runner-controller image and helm charts](#releasing-legacy-actions-runner-controller-image-and-helm-charts) + - [Release actions-runner-controller runner images](#release-actions-runner-controller-runner-images) + - [Release gha-runner-scale-set-controller image and helm charts](#release-gha-runner-scale-set-controller-image-and-helm-charts) + - [Release actions/runner image](#release-actionsrunner-image) + - [Canary releases](#canary-releases) ## Welcome @@ -25,14 +32,13 @@ reviewed and merged. ## Before contributing code -We welcome code patches, but to make sure things are well coordinated you should discuss any significant change before starting the work. -The maintainers ask that you signal your intention to contribute to the project using the issue tracker. -If there is an existing issue that you want to work on, please let us know so we can get it assigned to you. -If you noticed a bug or want to add a new feature, there are issue templates you can fill out. +We welcome code patches, but to make sure things are well coordinated you should discuss any significant change before starting the work. The maintainers ask that you signal your intention to contribute to the project using the issue tracker. If there is an existing issue that you want to work on, please let us know so we can get it assigned to you. If you noticed a bug or want to add a new feature, there are issue templates you can fill out. When filing a feature request, the maintainers will review the change and give you a decision on whether we are willing to accept the feature into the project. + For significantly large and/or complex features, we may request that you write up an architectural decision record ([ADR](https://github.blog/2020-08-13-why-write-adrs/)) detailing the change. -Please use the [template](/adrs/0000-TEMPLATE.md) as guidance. + +Please use the [template](/docs/adrs/yyyy-mm-dd-TEMPLATE) as guidance. workflow_a["arc-publish.yaml"] + event_b{{"workflow_dispatch"}} -- triggers --> workflow_a["arc-publish.yaml"] + workflow_a["arc-publish.yaml"] -- uploads --> package["actions-runner-controller.tar.gz"] + end + subgraph repository: actions-runner-controller/releases + workflow_a["arc-publish.yaml"] -- triggers --> event_d{{"repository_dispatch"}} --> workflow_b["publish-arc.yaml"] + workflow_b["publish-arc.yaml"] -- push --> A["GHCR: \nactions-runner-controller/actions-runner-controller:*"] + workflow_b["publish-arc.yaml"] -- push --> B["DockerHub: \nsummerwind/actions-runner-controller:*"] + end +``` + +#### Release actions-runner-controller runner images + +**Manual steps:** + +1. Navigate to the [actions-runner-controller/releases](https://github.com/actions-runner-controller/releases) repository +2. Trigger [the release-runners.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml) workflow. + 1. The list of input prameters for this workflow is defined in the table below (always inspect the workflow file for the latest version) + + +| Parameter | Description | Default | +|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| `runner_version` | The version of the [actions/runner](https://github.com/actions/runner) to use | `2.300.2` | +| `docker_version` | The version of docker to use | `20.10.12` | +| `runner_container_hooks_version` | The version of [actions/runner-container-hooks](https://github.com/actions/runner-container-hooks) to use | `0.2.0` | +| `sha` | The commit sha from [actions/actions-runner-controller](https://github.com/actions/actions-runner-controller) to be used to build the runner images. This will be provided to `actions/checkout` & used to tag the container images | Empty string. | +| `push_to_registries` | Whether to push the images to the registries. Use false to test the build | false | + +**Automated steps:** + +```mermaid +flowchart LR + workflow["release-runners.yaml"] -- workflow_dispatch* --> workflow_b["release-runners.yaml"] + subgraph repository: actions/actions-runner-controller + runner_updates_check["arc-update-runners-scheduled.yaml"] -- "polls (daily)" --> runner_releases["actions/runner/releases"] + runner_updates_check -- creates --> runner_update_pr["PR: update /runner/VERSION"]**** + runner_update_pr --> runner_update_pr_merge{{"merge"}} + runner_update_pr_merge -- triggers --> workflow["release-runners.yaml"] + end + subgraph repository: actions-runner-controller/releases + workflow_b["release-runners.yaml"] -- push --> A["GHCR: \n actions-runner-controller/actions-runner:* \n actions-runner-controller/actions-runner-dind:* \n actions-runner-controller/actions-runner-dind-rootless:*"] + workflow_b["release-runners.yaml"] -- push --> B["DockerHub: \n summerwind/actions-runner:* \n summerwind/actions-runner-dind:* \n summerwind/actions-runner-dind-rootless:*"] + event_b{{"workflow_dispatch"}} -- triggers --> workflow_b["release-runners.yaml"] + end +``` + +#### Release gha-runner-scale-set-controller image and helm charts + +1. Make sure the master branch is stable and all CI jobs are passing +1. Prepare a release PR (example: ) + 1. Bump up the version of the chart in: charts/gha-runner-scale-set-controller/Chart.yaml + 2. Bump up the version of the chart in: charts/gha-runner-scale-set/Chart.yaml + 1. Make sure that `version`, `appVersion` of both charts are always the same. These versions cannot diverge. + 3. Update the quickstart guide to reflect the latest versions: docs/preview/gha-runner-scale-set-controller/README.md + 4. Add changelog to the PR as well as the quickstart guide +1. Merge the release PR +1. Manually trigger the [(gha) Publish Helm Charts](https://github.com/actions/actions-runner-controller/actions/workflows/gha-publish-chart.yaml) workflow +1. Manually create a tag and release in [actions/actions-runner-controller](https://github.com/actions/actions-runner-controller/releases) with the format: `gha-runner-scale-set-x.x.x` where the version (x.x.x) matches that of the Helm chart + +| Parameter | Description | Default | +|-------------------------------------------------|--------------------------------------------------------------------------------------------------------|----------------| +| `ref` | The branch, tag or SHA to cut a release from. | default branch | +| `release_tag_name` | The tag of the controller image. This is not a git tag. | canary | +| `push_to_registries` | Push images to registries. Use false to test the build process. | false | +| `publish_gha_runner_scale_set_controller_chart` | Publish new helm chart for gha-runner-scale-set-controller. This will push the new OCI archive to GHCR | false | +| `publish_gha_runner_scale_set_chart` | Publish new helm chart for gha-runner-scale-set. This will push the new OCI archive to GHCR | false | + +#### Release actions/runner image + +A new runner image is built and published to whenever a new runner binary has been released. There's nothing to do here. + +#### Canary releases + +We publish canary images for both the legacy actions-runner-controller and gha-runner-scale-set-controller images. + +```mermaid +flowchart LR + subgraph org: actions + event_a{{"push: [master]"}} -- triggers --> workflow_a["publish-canary.yaml"] + end + subgraph org: actions-runner-controller + workflow_a["publish-canary.yaml"] -- triggers --> event_d{{"repository_dispatch"}} --> workflow_b["publish-canary.yaml"] + workflow_b["publish-canary.yaml"] -- push --> A["GHCR: \nactions-runner-controller/actions-runner-controller:canary"] + workflow_b["publish-canary.yaml"] -- push --> B["DockerHub: \nsummerwind/actions-runner-controller:canary"] + end +``` + +1. [actions-runner-controller canary image](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller) +2. [gha-runner-scale-set-controller image](https://github.com/actions/actions-runner-controller/pkgs/container/gha-runner-scale-set-controller) + +These canary images are automatically built and released on each push to the master branch. diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index 6c7839cede..3c35e17bbf 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -16,7 +16,9 @@ In addition to the increased reliability of the automatic scaling, we have worke ### Demo -[![Watch the walkthrough](https://img.youtube.com/vi/wQ0k5k6KW5Y/hqdefault.jpg)](https://youtu.be/wQ0k5k6KW5Y) +[![Watch the walkthrough](./thumbnail.png)](https://youtu.be/wQ0k5k6KW5Y) + +> Will take you to Youtube for a short walkthrough of the Autoscaling Runner Scale Sets mode. ## Setup @@ -35,8 +37,7 @@ In addition to the increased reliability of the automatic scaling, we have worke helm install arc \ --namespace "${NAMESPACE}" \ --create-namespace \ - oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \ - --version 0.4.0 + oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller ``` 1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app). @@ -57,7 +58,7 @@ In addition to the increased reliability of the automatic scaling, we have worke --create-namespace \ --set githubConfigUrl="${GITHUB_CONFIG_URL}" \ --set githubConfigSecret.github_token="${GITHUB_PAT}" \ - oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0 + oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set ``` ```bash @@ -75,7 +76,7 @@ In addition to the increased reliability of the automatic scaling, we have worke --set githubConfigSecret.github_app_id="${GITHUB_APP_ID}" \ --set githubConfigSecret.github_app_installation_id="${GITHUB_APP_INSTALLATION_ID}" \ --set githubConfigSecret.github_app_private_key="${GITHUB_APP_PRIVATE_KEY}" \ - oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0 + oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set ``` 1. Check your installation. If everything went well, you should see the following: @@ -139,7 +140,6 @@ Upgrading actions-runner-controller requires a few extra steps because CRDs will ```bash helm pull oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \ - --version 0.4.0 \ --untar && \ kubectl replace -f /gha-runner-scale-set-controller/crds/ ``` diff --git a/docs/preview/gha-runner-scale-set-controller/thumbnail.png b/docs/preview/gha-runner-scale-set-controller/thumbnail.png new file mode 100644 index 0000000000..1b718e39e0 --- /dev/null +++ b/docs/preview/gha-runner-scale-set-controller/thumbnail.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ed81a93a5e62dee54a47fbe7274462a80d3d39deff8bd396cf4065e3cf5b93f +size 1556557 From 6e40eceb352039fed6e612f61f6ce2f37049babc Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Wed, 17 May 2023 14:36:16 +0200 Subject: [PATCH 213/561] Add new architecture diagram (#2598) --- .../gha-runner-scale-set-controller/README.md | 17 +++++++++++++++-- .../arc-diagram-dark.png | 3 +++ .../arc-diagram-light.png | 3 +++ 3 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 docs/preview/gha-runner-scale-set-controller/arc-diagram-dark.png create mode 100644 docs/preview/gha-runner-scale-set-controller/arc-diagram-light.png diff --git a/docs/preview/gha-runner-scale-set-controller/README.md b/docs/preview/gha-runner-scale-set-controller/README.md index 3c35e17bbf..56baf8ffc6 100644 --- a/docs/preview/gha-runner-scale-set-controller/README.md +++ b/docs/preview/gha-runner-scale-set-controller/README.md @@ -4,7 +4,20 @@ This new autoscaling mode brings numerous enhancements (described in the followi ## How it works -![arc_hld_v1 drawio (1)](https://user-images.githubusercontent.com/568794/212665433-2d1f3d6e-0ba8-4f02-9d1b-27d00c49abd1.png) +![ARC architecture diagram](arc-diagram-light.png#gh-light-mode-only) +![ARC architecture diagram](arc-diagram-dark.png#gh-dark-mode-only) + +1. ARC is installed using the supplied Helm charts, and the controller manager pod is deployed in the specified namespace. A new `AutoScalingRunnerSet` resource is deployed via the supplied Helm charts or a customized manifest file. The `AutoScalingRunnerSet` controller calls GitHub's APIs to fetch the runner group ID that the runner scale set will belong to. +2. The `AutoScalingRunnerSet` controller calls the APIs one more time to either fetch or create a runner scale set in the `Actions Service` before creating the `Runner ScaleSet Listener` resource. +3. A `Runner ScaleSet Listener` pod is deployed by the `AutoScaling Listener Controller`. In this pod, the listener application connects to the `Actions Service` to authenticate and establish a long poll HTTPS connection. The listener stays idle until it receives a `Job Available` message from the `Actions Service`. +4. When a workflow run is triggered from a repository, the `Actions Service` dispatches individual job runs to the runners or runner scalesets where the `runs-on` property matches the name of the runner scaleset or labels of self-hosted runners. +5. When the `Runner ScaleSet Listener` receives the `Job Available` message, it checks whether it can scale up to the desired count. If it can, the `Runner ScaleSet Listener` acknowledges the message. +6. The `Runner ScaleSet Listener` uses a `Service Account` and a `Role` bound to that account to make an HTTPS call through the Kubernetes APIs to patch the `EphemeralRunner Set` resource with the number of desired replicas count. +7. The `EphemeralRunner Set` attempts to create new runners and the `EphemeralRunner Controller` requests a JIT configuration token to register these runners. The controller attempts to create runner pods. If the pod's status is `failed`, the controller retries up to 5 times. After 24 hours the `Actions Service` unassigns the job if no runner accepts it. +8. Once the runner pod is created, the runner application in the pod uses the JIT configuration token to register itself with the `Actions Service`. It then establishes another HTTPS long poll connection to receive the job details it needs to execute. +9. The `Actions Service` acknowledges the runner registration and dispatches the job run details. +10. Throughout the job run execution, the runner continuously communicates the logs and job run status back to the `Actions Service`. +11. When the runner completes its job successfully, the `EphemeralRunner Controller` checks with the `Actions Service` to see if runner can be deleted. If it can, the `Ephemeral RunnerSet` deletes the runner. In addition to the increased reliability of the automatic scaling, we have worked on these improvements: @@ -16,7 +29,7 @@ In addition to the increased reliability of the automatic scaling, we have worke ### Demo -[![Watch the walkthrough](./thumbnail.png)](https://youtu.be/wQ0k5k6KW5Y) +[![Watch the walkthrough](thumbnail.png)](https://youtu.be/wQ0k5k6KW5Y) > Will take you to Youtube for a short walkthrough of the Autoscaling Runner Scale Sets mode. diff --git a/docs/preview/gha-runner-scale-set-controller/arc-diagram-dark.png b/docs/preview/gha-runner-scale-set-controller/arc-diagram-dark.png new file mode 100644 index 0000000000..c1a0f811db --- /dev/null +++ b/docs/preview/gha-runner-scale-set-controller/arc-diagram-dark.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5eb13eaa0f0495f4f742a968072f5086f9eae4f8e2204e5b5c4a6c3ffabdf39a +size 1212398 diff --git a/docs/preview/gha-runner-scale-set-controller/arc-diagram-light.png b/docs/preview/gha-runner-scale-set-controller/arc-diagram-light.png new file mode 100644 index 0000000000..d37a281d60 --- /dev/null +++ b/docs/preview/gha-runner-scale-set-controller/arc-diagram-light.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbae2b58eb5636348560d9c4b3a06a92ed60a30276fc9f2ff452c37ce961b40a +size 801347 From 12866b563be401e592e52aa67dbd985da90715ad Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Thu, 18 May 2023 10:55:03 +0200 Subject: [PATCH 214/561] Add concurrency limits on all workflows to eliminate wasted cycles (#2603) --- .github/workflows/arc-publish-chart.yaml | 4 ++++ .github/workflows/arc-publish.yaml | 4 ++++ .github/workflows/arc-release-runners.yaml | 4 ++++ .github/workflows/arc-validate-chart.yaml | 7 +++++++ .github/workflows/arc-validate-runners.yaml | 7 +++++++ .github/workflows/gha-e2e-tests.yaml | 7 +++++++ .github/workflows/gha-publish-chart.yaml | 6 +++++- .github/workflows/gha-validate-chart.yaml | 7 +++++++ .github/workflows/global-publish-canary.yaml | 4 ++++ .github/workflows/global-run-codeql.yaml | 7 +++++++ .github/workflows/go.yaml | 7 +++++++ 11 files changed, 63 insertions(+), 1 deletion(-) diff --git a/.github/workflows/arc-publish-chart.yaml b/.github/workflows/arc-publish-chart.yaml index 54785bbcec..e5c9ce1822 100644 --- a/.github/workflows/arc-publish-chart.yaml +++ b/.github/workflows/arc-publish-chart.yaml @@ -28,6 +28,10 @@ env: permissions: contents: write +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + jobs: lint-chart: name: Lint Chart diff --git a/.github/workflows/arc-publish.yaml b/.github/workflows/arc-publish.yaml index fb23500ddd..fa318c1a08 100644 --- a/.github/workflows/arc-publish.yaml +++ b/.github/workflows/arc-publish.yaml @@ -25,6 +25,10 @@ env: TARGET_ORG: actions-runner-controller TARGET_REPO: actions-runner-controller +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + jobs: release-controller: name: Release diff --git a/.github/workflows/arc-release-runners.yaml b/.github/workflows/arc-release-runners.yaml index 8adad96abf..211c129108 100644 --- a/.github/workflows/arc-release-runners.yaml +++ b/.github/workflows/arc-release-runners.yaml @@ -19,6 +19,10 @@ env: TARGET_WORKFLOW: release-runners.yaml DOCKER_VERSION: 20.10.23 +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + jobs: build-runners: name: Trigger Build and Push of Runner Images diff --git a/.github/workflows/arc-validate-chart.yaml b/.github/workflows/arc-validate-chart.yaml index ed3b3ac37f..c11ad22f35 100644 --- a/.github/workflows/arc-validate-chart.yaml +++ b/.github/workflows/arc-validate-chart.yaml @@ -27,6 +27,13 @@ env: permissions: contents: read +concurrency: + # This will make sure we only apply the concurrency limits on pull requests + # but not pushes to master branch by making the concurrency group name unique + # for pushes + group: ${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: validate-chart: name: Lint Chart diff --git a/.github/workflows/arc-validate-runners.yaml b/.github/workflows/arc-validate-runners.yaml index 42380e911d..0b43f39b37 100644 --- a/.github/workflows/arc-validate-runners.yaml +++ b/.github/workflows/arc-validate-runners.yaml @@ -12,6 +12,13 @@ on: permissions: contents: read +concurrency: + # This will make sure we only apply the concurrency limits on pull requests + # but not pushes to master branch by making the concurrency group name unique + # for pushes + group: ${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: shellcheck: name: runner / shellcheck diff --git a/.github/workflows/gha-e2e-tests.yaml b/.github/workflows/gha-e2e-tests.yaml index 65230a2e19..48ca58505c 100644 --- a/.github/workflows/gha-e2e-tests.yaml +++ b/.github/workflows/gha-e2e-tests.yaml @@ -18,6 +18,13 @@ env: IMAGE_NAME: "arc-test-image" IMAGE_VERSION: "0.4.0" +concurrency: + # This will make sure we only apply the concurrency limits on pull requests + # but not pushes to master branch by making the concurrency group name unique + # for pushes + group: ${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: default-setup: runs-on: ubuntu-latest diff --git a/.github/workflows/gha-publish-chart.yaml b/.github/workflows/gha-publish-chart.yaml index f0e0107775..8d893e0296 100644 --- a/.github/workflows/gha-publish-chart.yaml +++ b/.github/workflows/gha-publish-chart.yaml @@ -33,7 +33,11 @@ env: HELM_VERSION: v3.8.0 permissions: - packages: write + packages: write + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true jobs: build-push-image: diff --git a/.github/workflows/gha-validate-chart.yaml b/.github/workflows/gha-validate-chart.yaml index d616d9ac62..eb4f1f5e82 100644 --- a/.github/workflows/gha-validate-chart.yaml +++ b/.github/workflows/gha-validate-chart.yaml @@ -23,6 +23,13 @@ env: permissions: contents: read +concurrency: + # This will make sure we only apply the concurrency limits on pull requests + # but not pushes to master branch by making the concurrency group name unique + # for pushes + group: ${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: validate-chart: name: Lint Chart diff --git a/.github/workflows/global-publish-canary.yaml b/.github/workflows/global-publish-canary.yaml index 27579084b8..cf97a7104f 100644 --- a/.github/workflows/global-publish-canary.yaml +++ b/.github/workflows/global-publish-canary.yaml @@ -37,6 +37,10 @@ permissions: contents: read packages: write +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + env: # Safeguard to prevent pushing images to registeries after build PUSH_TO_REGISTRIES: true diff --git a/.github/workflows/global-run-codeql.yaml b/.github/workflows/global-run-codeql.yaml index 908e864b09..b13463d409 100644 --- a/.github/workflows/global-run-codeql.yaml +++ b/.github/workflows/global-run-codeql.yaml @@ -10,6 +10,13 @@ on: schedule: - cron: '30 1 * * 0' +concurrency: + # This will make sure we only apply the concurrency limits on pull requests + # but not pushes to master branch by making the concurrency group name unique + # for pushes + group: ${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: analyze: name: Analyze diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index 6c13bac1be..21a783bac6 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -18,6 +18,13 @@ on: permissions: contents: read +concurrency: + # This will make sure we only apply the concurrency limits on pull requests + # but not pushes to master branch by making the concurrency group name unique + # for pushes + group: ${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: fmt: runs-on: ubuntu-latest From 7eff1ab186fac6e8726547c2deed1421aeacf06c Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Thu, 18 May 2023 14:15:05 +0200 Subject: [PATCH 215/561] Discard logs on helm chart tests (#2607) --- .../tests/template_test.go | 22 +++++++++ .../tests/template_test.go | 46 +++++++++++++++++++ go.mod | 6 +-- go.sum | 6 +++ 4 files changed, 77 insertions(+), 3 deletions(-) diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index a097b433ef..663e64b22a 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -8,6 +8,7 @@ import ( "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/logger" "github.com/gruntwork-io/terratest/modules/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -33,6 +34,7 @@ func TestTemplate_CreateServiceAccount(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "serviceAccount.create": "true", "serviceAccount.annotations.foo": "bar", @@ -61,6 +63,7 @@ func TestTemplate_CreateServiceAccount_OverwriteName(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "serviceAccount.create": "true", "serviceAccount.name": "overwritten-name", @@ -90,6 +93,7 @@ func TestTemplate_CreateServiceAccount_CannotUseDefaultServiceAccount(t *testing namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "serviceAccount.create": "true", "serviceAccount.name": "default", @@ -113,6 +117,7 @@ func TestTemplate_NotCreateServiceAccount(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "serviceAccount.create": "false", "serviceAccount.name": "overwritten-name", @@ -136,6 +141,7 @@ func TestTemplate_NotCreateServiceAccount_ServiceAccountNotSet(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "serviceAccount.create": "false", "serviceAccount.annotations.foo": "bar", @@ -158,6 +164,7 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{}, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -189,6 +196,7 @@ func TestTemplate_ManagerClusterRoleBinding(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "serviceAccount.create": "true", }, @@ -224,6 +232,7 @@ func TestTemplate_CreateManagerListenerRole(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{}, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -253,6 +262,7 @@ func TestTemplate_ManagerListenerRoleBinding(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "serviceAccount.create": "true", }, @@ -289,6 +299,7 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "image.tag": "dev", }, @@ -384,6 +395,7 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "labels.foo": "bar", "labels.github": "actions", @@ -508,6 +520,7 @@ func TestTemplate_EnableLeaderElectionRole(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "replicaCount": "2", }, @@ -534,6 +547,7 @@ func TestTemplate_EnableLeaderElectionRoleBinding(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "replicaCount": "2", }, @@ -562,6 +576,7 @@ func TestTemplate_EnableLeaderElection(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "replicaCount": "2", "image.tag": "dev", @@ -605,6 +620,7 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "imagePullSecrets[0].name": "dockerhub", "imagePullSecrets[1].name": "ghcr", @@ -643,6 +659,7 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "image.tag": "dev", "flags.watchSingleNamespace": "demo", @@ -732,6 +749,7 @@ func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "env[0].Name": "ENV_VAR_NAME_1", "env[0].Value": "ENV_VAR_VALUE_1", @@ -778,6 +796,7 @@ func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "flags.watchSingleNamespace": "demo", }, @@ -799,6 +818,7 @@ func TestTemplate_WatchSingleNamespace_NotManagerClusterRoleBinding(t *testing.T namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "serviceAccount.create": "true", "flags.watchSingleNamespace": "demo", @@ -821,6 +841,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "flags.watchSingleNamespace": "demo", }, @@ -857,6 +878,7 @@ func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "flags.watchSingleNamespace": "demo", }, diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index 8676da6684..d1d1c7776b 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -10,6 +10,7 @@ import ( actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com" "github.com/gruntwork-io/terratest/modules/helm" "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/logger" "github.com/gruntwork-io/terratest/modules/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,6 +29,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -59,6 +61,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_app_id": "10", @@ -92,6 +95,7 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAuthInput(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_app_id": "", @@ -119,6 +123,7 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAppInput(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_app_id": "10", @@ -145,6 +150,7 @@ func TestTemplateNotRenderedGitHubSecretWithPredefinedSecret(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret": "pre-defined-secret", @@ -169,6 +175,7 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -204,6 +211,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -271,6 +279,7 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -303,6 +312,7 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -354,6 +364,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -403,6 +414,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -450,6 +462,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MaxRunnersValidationError(t *testi namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -477,6 +490,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinRunnersValidationError(t *testi namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -505,6 +519,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationError(t *te namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -533,6 +548,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationSameValue(t namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -564,6 +580,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMin(t namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -594,6 +611,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMax(t namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -627,6 +645,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunners_FromValuesFile(t *te namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, ValuesFiles: []string{testValuesPath}, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -654,6 +673,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraVolumes(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "controllerServiceAccount.name": "arc", "controllerServiceAccount.namespace": "arc-system", @@ -688,6 +708,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "controllerServiceAccount.name": "arc", "controllerServiceAccount.namespace": "arc-system", @@ -724,6 +745,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_K8S_ExtraVolumes(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "controllerServiceAccount.name": "arc", "controllerServiceAccount.namespace": "arc-system", @@ -755,6 +777,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -846,6 +869,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T) namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -903,6 +927,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T) namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret": "pre-defined-secrets", @@ -937,6 +962,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ErrorOnEmptyPredefinedSecret(t *te namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret": "", @@ -963,6 +989,7 @@ func TestTemplateRenderedWithProxy(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret": "pre-defined-secrets", @@ -1026,6 +1053,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) { t.Run("providing githubServerTLS.runnerMountPath", func(t *testing.T) { t.Run("mode: default", func(t *testing.T) { options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret": "pre-defined-secrets", @@ -1084,6 +1112,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) { t.Run("mode: dind", func(t *testing.T) { options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret": "pre-defined-secrets", @@ -1143,6 +1172,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) { t.Run("mode: kubernetes", func(t *testing.T) { options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret": "pre-defined-secrets", @@ -1204,6 +1234,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) { t.Run("without providing githubServerTLS.runnerMountPath", func(t *testing.T) { t.Run("mode: default", func(t *testing.T) { options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret": "pre-defined-secrets", @@ -1258,6 +1289,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) { t.Run("mode: dind", func(t *testing.T) { options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret": "pre-defined-secrets", @@ -1313,6 +1345,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) { t.Run("mode: kubernetes", func(t *testing.T) { options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret": "pre-defined-secrets", @@ -1402,6 +1435,7 @@ func TestTemplateNamingConstraints(t *testing.T) { for name, tc := range tt { t.Run(name, func(t *testing.T) { options := &helm.Options{ + Logger: logger.Discard, SetValues: setValues, KubectlOptions: k8s.NewKubectlOptions("", "", tc.namespaceName), } @@ -1423,6 +1457,7 @@ func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions/", "githubConfigSecret.github_token": "gh_token12345", @@ -1453,6 +1488,7 @@ func TestTemplate_CreateManagerRole(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -1487,6 +1523,7 @@ func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -1520,6 +1557,7 @@ func TestTemplate_CreateManagerRoleBinding(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -1556,6 +1594,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraContainers(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "controllerServiceAccount.name": "arc", "controllerServiceAccount.namespace": "arc-system", @@ -1603,6 +1642,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraPodSpec(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "controllerServiceAccount.name": "arc", "controllerServiceAccount.namespace": "arc-system", @@ -1636,6 +1676,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) { namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "controllerServiceAccount.name": "arc", "controllerServiceAccount.namespace": "arc-system", @@ -1681,6 +1722,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T) namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "controllerServiceAccount.name": "arc", "controllerServiceAccount.namespace": "arc-system", @@ -1722,6 +1764,7 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing. annotationExpectedTests := map[string]*helm.Options{ "GitHub token": { + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", @@ -1731,6 +1774,7 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing. KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), }, "GitHub app": { + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_app_id": "10", @@ -1755,6 +1799,7 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing. t.Run("Annotation should not be set", func(t *testing.T) { options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret": "pre-defined-secret", @@ -1782,6 +1827,7 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t namespaceName := "test-" + strings.ToLower(random.UniqueId()) options := &helm.Options{ + Logger: logger.Discard, SetValues: map[string]string{ "githubConfigUrl": "https://github.com/actions", "githubConfigSecret.github_token": "gh_token12345", diff --git a/go.mod b/go.mod index c5a759eb54..350b8809f9 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 - github.com/gruntwork-io/terratest v0.41.11 + github.com/gruntwork-io/terratest v0.41.24 github.com/hashicorp/go-retryablehttp v0.7.2 github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 @@ -27,7 +27,7 @@ require ( go.uber.org/multierr v1.7.0 go.uber.org/zap v1.24.0 golang.org/x/net v0.8.0 - golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 + golang.org/x/oauth2 v0.1.0 golang.org/x/sync v0.1.0 gomodules.xyz/jsonpatch/v2 v2.2.0 gopkg.in/yaml.v2 v2.4.0 @@ -39,7 +39,7 @@ require ( ) require ( - github.com/aws/aws-sdk-go v1.40.56 // indirect + github.com/aws/aws-sdk-go v1.44.122 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect diff --git a/go.sum b/go.sum index b09cc55aa7..5aa01e1523 100644 --- a/go.sum +++ b/go.sum @@ -43,6 +43,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/aws/aws-sdk-go v1.40.56 h1:FM2yjR0UUYFzDTMx+mH9Vyw1k1EUUxsAFzk+BjkzANA= github.com/aws/aws-sdk-go v1.40.56/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= +github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -210,6 +212,8 @@ github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRa github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78= github.com/gruntwork-io/terratest v0.41.11 h1:EAHiK6PFWJCVkgW2yUompjSsZQzA0CfBcuqIaXtZdpk= github.com/gruntwork-io/terratest v0.41.11/go.mod h1:qH1xkPTTGx30XkMHw8jAVIbzqheSjIa5IyiTwSV2vKI= +github.com/gruntwork-io/terratest v0.41.24 h1:j6T6qe4deVvynTG2UmnjGwZy83he6xKgTaYWiSdFv/w= +github.com/gruntwork-io/terratest v0.41.24/go.mod h1:O6gajNBjO1wvc7Wl9WtbO+ORcdnhAV2GQiBE71ycwIk= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -468,6 +472,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= +golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From db28fc0cb2bc3a0074823e84f4a48ad82d8db71e Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Thu, 18 May 2023 15:37:41 +0200 Subject: [PATCH 216/561] Scale Set Metrics ADR (#2568) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- docs/adrs/2023-05-08-exposing-metrics.md | 213 +++++++++++++++++++++++ 1 file changed, 213 insertions(+) create mode 100644 docs/adrs/2023-05-08-exposing-metrics.md diff --git a/docs/adrs/2023-05-08-exposing-metrics.md b/docs/adrs/2023-05-08-exposing-metrics.md new file mode 100644 index 0000000000..6dc2fd7eee --- /dev/null +++ b/docs/adrs/2023-05-08-exposing-metrics.md @@ -0,0 +1,213 @@ +# Exposing metrics + +Date: 2023-05-08 + +**Status**: Proposed + +## Context + +Prometheus metrics are a common way to monitor the cluster. Providing metrics +can be a helpful way to monitor scale sets and the health of the ephemeral runners. + +## Proposal + +Two main components are driving the behavior of the scale set: + +1. ARC controllers responsible for managing Kubernetes resources. +2. The `AutoscalingListener`, driver of the autoscaling solution responsible for + describing the desired state. + +We can approach publishing those metrics in 3 different ways + +### Option 1: Expose a metrics endpoint for the controller-manager and every instance of the listener + +To expose metrics, we would need to create 3 additional resources: + +1. `ServiceMonitor` - a resource used by Prometheus to match namespaces and + services from where it needs to gather metrics +2. `Service` for the `gha-runner-scale-set-controller` - service that will + target ARC controller `Deployment` +3. `Service` for each `gha-runner-scale-set` listener - service that will target + a single listener pod for each `AutoscalingRunnerSet` + +#### Pros + +- Easy to control which scale set exposes metrics and which does not. +- Easy to implement using helm charts in case they are enabled per chart + installation. + +#### Cons + +- With a cluster running many scale sets, we are going to create a lot of + resources. +- In case metrics are enabled on the controller manager level, and they should + be applied across all `AutoscalingRunnerSets`, it is difficult to inherit this + configuration by applying helm charts. + +### Option 2: Create a single metrics aggregator service + +To create an aggregator service, we can create a simple web application +responsible for publishing and gathering metrics. All listeners would be +responsible to communicate the metrics on each message, and controllers are +responsible to communicate the metrics on each reconciliation. + +The application can be executed as a single pod, or as a side container next to +the manager. + +#### Running the aggregator as a container in the controller-manager pod + +**Pros:** +- It exists side by side and is following the life cycle of the controller + manager +- We don't need to introduce another controller managing the state of the pod + +**Cons** + +- Crashes of the aggregator can influence the controller manager execution +- The controller manager pod needs more resources to run + +#### Running the aggregator in a separate pod + +**Pros** + +- Does not influence the controller manager pod +- The life cycle of the metric can be controlled by the controller manager (by + implementing another controller) + +**Cons** + +- We need to implement the controller that can spin up the aggregator in case of + the crash. +- If we choose not to implement the controller, the resource like `Deployment` + can be used to manage the aggregator, but we lose control over its life cycle. + +#### Metrics webserver requirements + +1. Create a web server with a single `/metrics` endpoint. The endpoint will have + `POST` and `GET` methods registered. The `GET` is used by Prometheus to + fetch the metrics, while the `POST` is going to be used by controllers and + listeners to publish their metrics. +2. `ServiceMonitor` - to target the metrics aggregator service +3. `Service` sitting in front of the web server. + +**Pros** + +- This implementation requires a few additional resources to be created + in a cluster. +- Web server is easy to implement and easy to document - all metrics are aggregated in a + single package, and the web server only needs to apply them to its state on + `POST`. The `GET` handler is simple. +- We can avoid Pushgateway from Prometheus. + +**Cons** + +- Another image that we need to publish on release. +- Change in metric configuration (on manager update) would require re-creation + of all listeners. This is not a big problem but is something to point out. +- Managing requests/limits can be tricky. + +### Option 3: Use a Prometheus Pushgateway + +#### Pros + +- Using a supported way of pushing the metrics. +- Easy to implement using their library. + +#### Cons + +- In the Prometheus docs, they specify that: "Usually, the only valid use case + for Pushgateway is for capturing the outcome of a service-level batch job". + The listener does not really fit this criteria. +- Pushgateway is a single point of failure and potential bottleneck. +- You lose Prometheus's automatic instance health monitoring via the up metric (generated on every scrape). +- The Pushgateway never forgets series pushed to it and will expose them to Prometheus forever unless those series are manually deleted via the Pushgateway's API. + +## Decision + +Since there are many ways in which you can collect metrics, we have decided not +to apply `prometheus-operator` resources nor `Service`. + +The responsibility of the controller and the autoscaling listener is +only to expose metrics. It is up to the user to decide how to collect them. + +When installing the ARC, the configuration for both the controller manager +and autoscaling listeners' metric servers is established. + +### Controller metrics + +By default, metrics server is listening on `0.0.0.0:8080`. +You can control the port of the metrics server using the `--metrics-addr` flag. + +Metrics can be collected from `/metrics` endpoint + +If the value of `--metrics-addr` is an empty string, metrics server won't be +started. + +### Autoscaling listeners + +By default, metrics server is listening on `0.0.0.0:8080`. +The endpoint used to expose metrics is `/metrics`. + +You can control both the address and the endpoint using `--listener-metrics-addr` and `--listener-metrics-endpoint` flags. + +If the value of `--listener-metrics-addr` is an empty string, metrics server won't be +started. + +### Metrics exposed by the controller + +To get a better understanding of health and workings of the cluster +resources, we need to expose the following metrics: + +- `pending_ephemeral_runners` - Number of ephemeral runners in a pending state. + This information can show the latency between creating an `EphemeralRunner` + resource, and having an ephemeral runner pod started and ready to receive a + job. +- `running_ephemeral_runners` - Number of ephemeral runners currently running. + This information is helpful to see how many ephemeral runner pods are running + at any given time. +- `failed_ephemeral_runners` - Number of ephemeral runners in a `Failed` state. + This information is helpful to catch the faulty image, or some underlying + problem. When the ephemeral runner controller is not able to start the + ephemeral runner pod after multiple retries, it will set the state of the + `EphemeralRunner` to failed. Since the controller can not recover from this + state, it can be useful to set Prometheus alerts to catch this issue quickly. + +### Metrics exposed by the `AutoscalingListener` + +Since the listener is responsible for communicating the state with the actions +service, it can expose actions service related data through metrics. In +particular: + +- `available_jobs` - Number of jobs with `runs-on` matching the runner scale set name. Jobs are not yet assigned but are acquired by the runner scale set. +- `acquired_jobs`- Number of jobs acquired by the scale set. +- `assigned_jobs` - Number of jobs assigned to this scale set. +- `running_jobs` - Number of jobs running (or about to be run). +- `registered_runners` - Number of registered runners. +- `busy_runners` - Number of registered runners running a job. +- `min_runners` - Number of runners desired by the scale set. +- `max_runners` - Number of runners desired by the scale set. +- `desired_runners` - Number of runners desired by the scale set. +- `idle_runners` - Number of registered runners not running a job. +- `available_jobs_total` - Total number of jobs available for the scale set (runs-on matches and scale set passes all the runner group permission checks). +- `acquired_jobs_total` - Total number of jobs acquired by the scale set. +- `assigned_jobs_total` - Total number of jobs assigned to the scale set. +- `started_jobs_total` - Total number of jobs started. +- `completed_jobs_total` - Total number of jobs completed. +- `job_queue_duration_seconds` - Time spent waiting for workflow jobs to get assigned to the scale set after queueing (in seconds). +- `job_startup_duration_seconds` - Time spent waiting for a workflow job to get started on the runner owned by the scale set (in seconds). +- `job_execution_duration_seconds` - Time spent executing workflow jobs by the scale set (in seconds). + +### Metric names + +Listener metrics belong to the `github_runner_scale_set` subsystem, so the names +are going to have the `github_runner_scale_set_` prefix. + +Controller metrics belong to the `github_runner_scale_set_controller` subsystem, +so the names are going to have `github_runner_scale_set_controller` prefix. + +## Consequences + +Users can define alerts, monitor the behavior of both the actions-based metrics +(gathered from the listener) and the Kubernetes resource-based metrics +(gathered from the controller manager). + From 2a95f6bf8213986f2629c44af7ee0bb521c57527 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Mon, 22 May 2023 13:16:38 +0200 Subject: [PATCH 217/561] Fix workflows concurrency group names (#2611) --- .github/workflows/arc-validate-chart.yaml | 2 +- .github/workflows/arc-validate-runners.yaml | 2 +- .github/workflows/gha-e2e-tests.yaml | 2 +- .github/workflows/gha-validate-chart.yaml | 2 +- .github/workflows/global-run-codeql.yaml | 2 +- .github/workflows/go.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/arc-validate-chart.yaml b/.github/workflows/arc-validate-chart.yaml index c11ad22f35..5e5f8b227f 100644 --- a/.github/workflows/arc-validate-chart.yaml +++ b/.github/workflows/arc-validate-chart.yaml @@ -31,7 +31,7 @@ concurrency: # This will make sure we only apply the concurrency limits on pull requests # but not pushes to master branch by making the concurrency group name unique # for pushes - group: ${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/arc-validate-runners.yaml b/.github/workflows/arc-validate-runners.yaml index 0b43f39b37..562320f642 100644 --- a/.github/workflows/arc-validate-runners.yaml +++ b/.github/workflows/arc-validate-runners.yaml @@ -16,7 +16,7 @@ concurrency: # This will make sure we only apply the concurrency limits on pull requests # but not pushes to master branch by making the concurrency group name unique # for pushes - group: ${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/gha-e2e-tests.yaml b/.github/workflows/gha-e2e-tests.yaml index 48ca58505c..07621df0fa 100644 --- a/.github/workflows/gha-e2e-tests.yaml +++ b/.github/workflows/gha-e2e-tests.yaml @@ -22,7 +22,7 @@ concurrency: # This will make sure we only apply the concurrency limits on pull requests # but not pushes to master branch by making the concurrency group name unique # for pushes - group: ${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/gha-validate-chart.yaml b/.github/workflows/gha-validate-chart.yaml index eb4f1f5e82..e2cf3c3dcc 100644 --- a/.github/workflows/gha-validate-chart.yaml +++ b/.github/workflows/gha-validate-chart.yaml @@ -27,7 +27,7 @@ concurrency: # This will make sure we only apply the concurrency limits on pull requests # but not pushes to master branch by making the concurrency group name unique # for pushes - group: ${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/global-run-codeql.yaml b/.github/workflows/global-run-codeql.yaml index b13463d409..aae1be2f65 100644 --- a/.github/workflows/global-run-codeql.yaml +++ b/.github/workflows/global-run-codeql.yaml @@ -14,7 +14,7 @@ concurrency: # This will make sure we only apply the concurrency limits on pull requests # but not pushes to master branch by making the concurrency group name unique # for pushes - group: ${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/go.yaml b/.github/workflows/go.yaml index 21a783bac6..7611d2f825 100644 --- a/.github/workflows/go.yaml +++ b/.github/workflows/go.yaml @@ -22,7 +22,7 @@ concurrency: # This will make sure we only apply the concurrency limits on pull requests # but not pushes to master branch by making the concurrency group name unique # for pushes - group: ${{ github.head_ref || github.run_id }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: From efcd053ab1623c34813b3f6d23ba989f7846d13e Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Tue, 23 May 2023 13:42:30 +0200 Subject: [PATCH 218/561] Add DrainJobsMode (aka UpdateStrategy feature) (#2569) --- .../execute-assert-arc-e2e/action.yaml | 46 ++++- .github/workflows/gha-e2e-tests.yaml | 170 ++++++++++++++++++ .gitignore | 1 - .../templates/deployment.yaml | 3 + .../tests/template_test.go | 18 +- .../values.yaml | 20 ++- .../autoscalingrunnerset_controller.go | 97 +++++++--- .../autoscalingrunnerset_controller_test.go | 121 ++++++++++++- main.go | 12 ++ 9 files changed, 449 insertions(+), 39 deletions(-) diff --git a/.github/actions/execute-assert-arc-e2e/action.yaml b/.github/actions/execute-assert-arc-e2e/action.yaml index 37d9c5853f..8089d15f20 100644 --- a/.github/actions/execute-assert-arc-e2e/action.yaml +++ b/.github/actions/execute-assert-arc-e2e/action.yaml @@ -23,6 +23,14 @@ inputs: arc-controller-namespace: description: 'The namespace of the configured gha-runner-scale-set-controller' required: true + wait-to-finish: + description: 'Wait for the workflow run to finish' + required: true + default: "true" + wait-to-running: + description: 'Wait for the workflow run to start running' + required: true + default: "false" runs: using: "composite" @@ -118,7 +126,36 @@ runs: | ${{steps.query_workflow.outputs.workflow_run_url}} | EOF + - name: Wait for workflow to start running + if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false' + uses: actions/github-script@v6 + with: + script: | + function sleep(ms) { + return new Promise(resolve => setTimeout(resolve, ms)) + } + const owner = '${{inputs.repo-owner}}' + const repo = '${{inputs.repo-name}}' + const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}} + const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}} + let count = 0 + while (count++<10) { + await sleep(30 * 1000); + let getRunResponse = await github.rest.actions.getWorkflowRun({ + owner: owner, + repo: repo, + run_id: workflow_run_id + }) + console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`); + if (getRunResponse.data.status == 'in_progress') { + console.log(`Workflow run is in progress.`) + return + } + } + core.setFailed(`The triggered workflow run didn't start properly using ${{inputs.arc-name}}`) + - name: Wait for workflow to finish successfully + if: inputs.wait-to-finish == 'true' uses: actions/github-script@v6 with: script: | @@ -151,10 +188,15 @@ runs: } core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`) - - name: Gather logs and cleanup + - name: cleanup + if: inputs.wait-to-finish == 'true' shell: bash - if: always() run: | helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }} + + - name: Gather logs and cleanup + shell: bash + if: always() + run: | kubectl logs deployment/arc-gha-runner-scale-set-controller -n ${{inputs.arc-controller-namespace}} \ No newline at end of file diff --git a/.github/workflows/gha-e2e-tests.yaml b/.github/workflows/gha-e2e-tests.yaml index 07621df0fa..6badcbf5a3 100644 --- a/.github/workflows/gha-e2e-tests.yaml +++ b/.github/workflows/gha-e2e-tests.yaml @@ -710,3 +710,173 @@ jobs: arc-name: ${{steps.install_arc.outputs.ARC_NAME}} arc-namespace: "arc-runners" arc-controller-namespace: "arc-systems" + + update-strategy-tests: + runs-on: ubuntu-latest + timeout-minutes: 20 + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id + env: + WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml" + steps: + - uses: actions/checkout@v3 + with: + ref: ${{github.head_ref}} + + - uses: ./.github/actions/setup-arc-e2e + id: setup + with: + app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}} + app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}} + image-name: ${{env.IMAGE_NAME}} + image-tag: ${{env.IMAGE_VERSION}} + target-org: ${{env.TARGET_ORG}} + + - name: Install gha-runner-scale-set-controller + id: install_arc_controller + run: | + helm install arc \ + --namespace "arc-systems" \ + --create-namespace \ + --set image.repository=${{ env.IMAGE_NAME }} \ + --set image.tag=${{ env.IMAGE_VERSION }} \ + --set flags.updateStrategy="eventual" \ + ./charts/gha-runner-scale-set-controller \ + --debug + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 60 ]; then + echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" + exit 1 + fi + sleep 1 + count=$((count+1)) + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller + kubectl get pod -n arc-systems + kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems + + - name: Install gha-runner-scale-set + id: install_arc + run: | + ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1)) + helm install "$ARC_NAME" \ + --namespace "arc-runners" \ + --create-namespace \ + --set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \ + --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ + ./charts/gha-runner-scale-set \ + --debug + echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT + count=0 + while true; do + POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name) + if [ -n "$POD_NAME" ]; then + echo "Pod found: $POD_NAME" + break + fi + if [ "$count" -ge 60 ]; then + echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" + exit 1 + fi + sleep 1 + count=$((count+1)) + done + kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME + kubectl get pod -n arc-systems + + - name: Trigger long running jobs and wait for runners to pick them up + uses: ./.github/actions/execute-assert-arc-e2e + timeout-minutes: 10 + with: + auth-token: ${{ steps.setup.outputs.token }} + repo-owner: ${{ env.TARGET_ORG }} + repo-name: ${{env.TARGET_REPO}} + workflow-file: ${{env.WORKFLOW_FILE}} + arc-name: ${{steps.install_arc.outputs.ARC_NAME}} + arc-namespace: "arc-runners" + arc-controller-namespace: "arc-systems" + wait-to-running: "true" + wait-to-finish: "false" + + - name: Upgrade the gha-runner-scale-set + shell: bash + run: | + helm upgrade --install "${{ steps.install_arc.outputs.ARC_NAME }}" \ + --namespace "arc-runners" \ + --create-namespace \ + --set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{ env.TARGET_REPO }}" \ + --set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \ + --set template.spec.containers[0].name="runner" \ + --set template.spec.containers[0].image="ghcr.io/actions/actions-runner:latest" \ + --set template.spec.containers[0].command={"/home/runner/run.sh"} \ + --set template.spec.containers[0].env[0].name="TEST" \ + --set template.spec.containers[0].env[0].value="E2E TESTS" \ + ./charts/gha-runner-scale-set \ + --debug + + - name: Assert that the listener is deleted while jobs are running + shell: bash + run: | + count=0 + while true; do + LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name=${{ steps.install_arc.outputs.ARC_NAME }} -n arc-systems --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')" + RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n arc-runners --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')" + RESOURCES="$(kubectl get pods -A)" + + if [ "$LISTENER_COUNT" -eq 0 ]; then + echo "Listener has been deleted" + echo "$RESOURCES" + exit 0 + fi + if [ "$count" -ge 60 ]; then + echo "Timeout waiting for listener to be deleted" + echo "$RESOURCES" + exit 1 + fi + + echo "Waiting for listener to be deleted" + echo "Listener count: $LISTENER_COUNT target: 0 | Runners count: $RUNNERS_COUNT target: 3" + + sleep 1 + count=$((count+1)) + done + + - name: Assert that the listener goes back up after the jobs are done + shell: bash + run: | + count=0 + while true; do + LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name=${{ steps.install_arc.outputs.ARC_NAME }} -n arc-systems --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')" + RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n arc-runners --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')" + RESOURCES="$(kubectl get pods -A)" + + if [ "$LISTENER_COUNT" -eq 1 ]; then + echo "Listener is up!" + echo "$RESOURCES" + exit 0 + fi + if [ "$count" -ge 120 ]; then + echo "Timeout waiting for listener to be recreated" + echo "$RESOURCES" + exit 1 + fi + + echo "Waiting for listener to be recreated" + echo "Listener count: $LISTENER_COUNT target: 1 | Runners count: $RUNNERS_COUNT target: 0" + + sleep 1 + count=$((count+1)) + done + + - name: Gather logs and cleanup + shell: bash + if: always() + run: | + helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug + kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}" + kubectl logs deployment/arc-gha-runner-scale-set-controller -n "arc-systems" \ No newline at end of file diff --git a/.gitignore b/.gitignore index ce539d20b8..e0fcafbf16 100644 --- a/.gitignore +++ b/.gitignore @@ -35,5 +35,4 @@ bin .DS_STORE /test-assets - /.tools diff --git a/charts/gha-runner-scale-set-controller/templates/deployment.yaml b/charts/gha-runner-scale-set-controller/templates/deployment.yaml index 1997d2b0c2..dc0b88a7eb 100644 --- a/charts/gha-runner-scale-set-controller/templates/deployment.yaml +++ b/charts/gha-runner-scale-set-controller/templates/deployment.yaml @@ -59,6 +59,9 @@ spec: {{- with .Values.flags.watchSingleNamespace }} - "--watch-single-namespace={{ . }}" {{- end }} + {{- with .Values.flags.updateStrategy }} + - "--update-strategy={{ . }}" + {{- end }} command: - "/manager" env: diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index 663e64b22a..e972bd07ab 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -356,9 +356,10 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 2) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3) assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1]) + assert.Equal(t, "--update-strategy=immediate", deployment.Spec.Template.Spec.Containers[0].Args[2]) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) @@ -417,7 +418,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { "tolerations[0].key": "foo", "affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo", "affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar", - "priorityClassName": "test-priority-class", + "priorityClassName": "test-priority-class", + "flags.updateStrategy": "eventual", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -482,10 +484,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4) assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) + assert.Equal(t, "--update-strategy=eventual", deployment.Spec.Template.Spec.Containers[0].Args[3]) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) @@ -602,11 +605,12 @@ func TestTemplate_EnableLeaderElection(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 5) assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) assert.Equal(t, "--enable-leader-election", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--leader-election-id=test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.Containers[0].Args[2]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[3]) + assert.Equal(t, "--update-strategy=immediate", deployment.Spec.Template.Spec.Containers[0].Args[4]) } func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) { @@ -635,10 +639,11 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) { assert.Equal(t, namespaceName, deployment.Namespace) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4) assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) + assert.Equal(t, "--update-strategy=immediate", deployment.Spec.Template.Spec.Containers[0].Args[3]) } func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) { @@ -716,10 +721,11 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3) + assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4) assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1]) assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2]) + assert.Equal(t, "--update-strategy=immediate", deployment.Spec.Template.Spec.Containers[0].Args[3]) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) diff --git a/charts/gha-runner-scale-set-controller/values.yaml b/charts/gha-runner-scale-set-controller/values.yaml index 03359b5f6a..b0405609cc 100644 --- a/charts/gha-runner-scale-set-controller/values.yaml +++ b/charts/gha-runner-scale-set-controller/values.yaml @@ -76,10 +76,26 @@ affinity: {} priorityClassName: "" flags: - # Log level can be set here with one of the following values: "debug", "info", "warn", "error". - # Defaults to "debug". + ## Log level can be set here with one of the following values: "debug", "info", "warn", "error". + ## Defaults to "debug". logLevel: "debug" ## Restricts the controller to only watch resources in the desired namespace. ## Defaults to watch all namespaces when unset. # watchSingleNamespace: "" + + ## Defines how the controller should handle upgrades while having running jobs. + ## + ## The srategies available are: + ## - "immediate": (default) The controller will immediately apply the change causing the + ## recreation of the listener and ephemeral runner set. This can lead to an + ## overprovisioning of runners, if there are pending / running jobs. This should not + ## be a problem at a small scale, but it could lead to a significant increase of + ## resources if you have a lot of jobs running concurrently. + ## + ## - "eventual": The controller will remove the listener and ephemeral runner set + ## immediately, but will not recreate them (to apply changes) until all + ## pending / running jobs have completed. + ## This can lead to a longer time to apply the change but it will ensure + ## that you don't have any overprovisioning of runners. + updateStrategy: "immediate" \ No newline at end of file diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 79e28df90a..201e796baf 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -49,6 +49,24 @@ const ( runnerScaleSetNameAnnotationKey = "runner-scale-set-name" ) +type UpdateStrategy string + +// Defines how the controller should handle upgrades while having running jobs. +const ( + // "immediate": (default) The controller will immediately apply the change causing the + // recreation of the listener and ephemeral runner set. This can lead to an + // overprovisioning of runners, if there are pending / running jobs. This should not + // be a problem at a small scale, but it could lead to a significant increase of + // resources if you have a lot of jobs running concurrently. + UpdateStrategyImmediate = UpdateStrategy("immediate") + // "eventual": The controller will remove the listener and ephemeral runner set + // immediately, but will not recreate them (to apply changes) until all + // pending / running jobs have completed. + // This can lead to a longer time to apply the change but it will ensure + // that you don't have any overprovisioning of runners. + UpdateStrategyEventual = UpdateStrategy("eventual") +) + // AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object type AutoscalingRunnerSetReconciler struct { client.Client @@ -57,6 +75,7 @@ type AutoscalingRunnerSetReconciler struct { ControllerNamespace string DefaultRunnerScaleSetListenerImage string DefaultRunnerScaleSetListenerImagePullSecrets []string + UpdateStrategy UpdateStrategy ActionsClient actions.MultiClient resourceBuilder resourceBuilder @@ -218,35 +237,21 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash]) } - if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] { - log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set") - return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log) - } - - oldRunnerSets := existingRunnerSets.old() - if len(oldRunnerSets) > 0 { - log.Info("Cleanup old ephemeral runner sets", "count", len(oldRunnerSets)) - err := r.deleteEphemeralRunnerSets(ctx, oldRunnerSets, log) - if err != nil { - log.Error(err, "Failed to clean up old runner sets") - return ctrl.Result{}, err - } - } - // Make sure the AutoscalingListener is up and running in the controller namespace listener := new(v1alpha1.AutoscalingListener) + listenerFound := true if err := r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, listener); err != nil { - if kerrors.IsNotFound(err) { - // We don't have a listener - log.Info("Creating a new AutoscalingListener for the runner set", "ephemeralRunnerSetName", latestRunnerSet.Name) - return r.createAutoScalingListenerForRunnerSet(ctx, autoscalingRunnerSet, latestRunnerSet, log) + if !kerrors.IsNotFound(err) { + log.Error(err, "Failed to get AutoscalingListener resource") + return ctrl.Result{}, err } - log.Error(err, "Failed to get AutoscalingListener resource") - return ctrl.Result{}, err + + listenerFound = false + log.Info("AutoscalingListener does not exist.") } // Our listener pod is out of date, so we need to delete it to get a new recreate. - if listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() { + if listenerFound && (listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()) { log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name) if err := r.Delete(ctx, listener); err != nil { if kerrors.IsNotFound(err) { @@ -260,6 +265,44 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, nil } + if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] { + if r.drainingJobs(&latestRunnerSet.Status) { + log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners) + log.Info("Scaling down the number of desired replicas to 0") + // We are in the process of draining the jobs. The listener has been deleted and the ephemeral runner set replicas + // need to scale down to 0 + err := patch(ctx, r.Client, latestRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) { + obj.Spec.Replicas = 0 + }) + if err != nil { + log.Error(err, "Failed to patch runner set to set desired count to 0") + } + return ctrl.Result{}, err + } + log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set") + return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log) + } + + oldRunnerSets := existingRunnerSets.old() + if len(oldRunnerSets) > 0 { + log.Info("Cleanup old ephemeral runner sets", "count", len(oldRunnerSets)) + err := r.deleteEphemeralRunnerSets(ctx, oldRunnerSets, log) + if err != nil { + log.Error(err, "Failed to clean up old runner sets") + return ctrl.Result{}, err + } + } + + // Make sure the AutoscalingListener is up and running in the controller namespace + if !listenerFound { + if r.drainingJobs(&latestRunnerSet.Status) { + log.Info("Creating a new AutoscalingListener is waiting for the running and pending runners to finish. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners) + return ctrl.Result{}, nil + } + log.Info("Creating a new AutoscalingListener for the runner set", "ephemeralRunnerSetName", latestRunnerSet.Name) + return r.createAutoScalingListenerForRunnerSet(ctx, autoscalingRunnerSet, latestRunnerSet, log) + } + // Update the status of autoscaling runner set. if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners { if err := patchSubResource(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { @@ -276,6 +319,16 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, nil } +// Prevents overprovisioning of runners. +// We reach this code path when runner scale set has been patched with a new runner spec but there are still running ephemeral runners. +// The safest approach is to wait for the running ephemeral runners to finish before creating a new runner set. +func (r *AutoscalingRunnerSetReconciler) drainingJobs(latestRunnerSetStatus *v1alpha1.EphemeralRunnerSetStatus) bool { + if r.UpdateStrategy == UpdateStrategyEventual && ((latestRunnerSetStatus.RunningEphemeralRunners + latestRunnerSetStatus.PendingEphemeralRunners) > 0) { + return true + } + return false +} + func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) { logger.Info("Cleaning up the listener") var listener v1alpha1.AutoscalingListener diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go index 390511e634..41512e06c1 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller_test.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller_test.go @@ -42,6 +42,7 @@ const ( var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() { var ctx context.Context var mgr ctrl.Manager + var controller *AutoscalingRunnerSetReconciler var autoscalingNS *corev1.Namespace var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet var configSecret *corev1.Secret @@ -63,7 +64,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() { autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) - controller := &AutoscalingRunnerSetReconciler{ + controller = &AutoscalingRunnerSetReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Log: logf.Log, @@ -424,6 +425,110 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() { }) }) + Context("When updating an AutoscalingRunnerSet with running or pending jobs", func() { + It("It should wait for running and pending jobs to finish before applying the update. Update Strategy is set to eventual.", func() { + // Switch update strategy to eventual (drain jobs ) + controller.UpdateStrategy = UpdateStrategyEventual + // Wait till the listener is created + listener := new(v1alpha1.AutoscalingListener) + Eventually( + func() error { + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener) + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(Succeed(), "Listener should be created") + + // Wait till the ephemeral runner set is created + Eventually( + func() (int, error) { + runnerSetList := new(v1alpha1.EphemeralRunnerSetList) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) + if err != nil { + return 0, err + } + + return len(runnerSetList.Items), nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created") + + runnerSetList := new(v1alpha1.EphemeralRunnerSetList) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) + Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet") + + // Emulate running and pending jobs + runnerSet := runnerSetList.Items[0] + activeRunnerSet := runnerSet.DeepCopy() + activeRunnerSet.Status.CurrentReplicas = 6 + activeRunnerSet.Status.FailedEphemeralRunners = 1 + activeRunnerSet.Status.RunningEphemeralRunners = 2 + activeRunnerSet.Status.PendingEphemeralRunners = 3 + + desiredStatus := v1alpha1.AutoscalingRunnerSetStatus{ + CurrentRunners: activeRunnerSet.Status.CurrentReplicas, + State: "", + PendingEphemeralRunners: activeRunnerSet.Status.PendingEphemeralRunners, + RunningEphemeralRunners: activeRunnerSet.Status.RunningEphemeralRunners, + FailedEphemeralRunners: activeRunnerSet.Status.FailedEphemeralRunners, + } + + err = k8sClient.Status().Patch(ctx, activeRunnerSet, client.MergeFrom(&runnerSet)) + Expect(err).NotTo(HaveOccurred(), "Failed to patch runner set status") + + Eventually( + func() (v1alpha1.AutoscalingRunnerSetStatus, error) { + updated := new(v1alpha1.AutoscalingRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated) + if err != nil { + return v1alpha1.AutoscalingRunnerSetStatus{}, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err) + } + return updated.Status, nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeEquivalentTo(desiredStatus), "AutoScalingRunnerSet status should be updated") + + // Patch the AutoScalingRunnerSet image which should trigger + // the recreation of the Listener and EphemeralRunnerSet + patched := autoscalingRunnerSet.DeepCopy() + patched.Spec.Template.Spec = corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "runner", + Image: "ghcr.io/actions/abcd:1.1.1", + }, + }, + } + // patched.Spec.Template.Spec.PriorityClassName = "test-priority-class" + err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) + Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") + autoscalingRunnerSet = patched.DeepCopy() + + // The EphemeralRunnerSet should not be recreated + Consistently( + func() (string, error) { + runnerSetList := new(v1alpha1.EphemeralRunnerSetList) + err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace)) + Expect(err).NotTo(HaveOccurred(), "failed to fetch AutoScalingRunnerSet") + return runnerSetList.Items[0].Name, nil + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(Equal(activeRunnerSet.Name), "The EphemeralRunnerSet should not be recreated") + + // The listener should not be recreated + Consistently( + func() error { + return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener) + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).ShouldNot(Succeed(), "Listener should not be recreated") + }) + }) + It("Should update Status on EphemeralRunnerSet status Update", func() { ars := new(v1alpha1.AutoscalingRunnerSet) Eventually( @@ -1617,10 +1722,14 @@ var _ = Describe("Test resource version and build version mismatch", func() { startManagers(GinkgoT(), mgr) - Eventually(func() bool { - ars := new(v1alpha1.AutoscalingRunnerSet) - err := k8sClient.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Name}, ars) - return errors.IsNotFound(err) - }).Should(BeTrue()) + Eventually( + func() bool { + ars := new(v1alpha1.AutoscalingRunnerSet) + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Name}, ars) + return errors.IsNotFound(err) + }, + autoscalingRunnerSetTestTimeout, + autoscalingRunnerSetTestInterval, + ).Should(BeTrue()) }) }) diff --git a/main.go b/main.go index 543db603ab..9cc1bbdd46 100644 --- a/main.go +++ b/main.go @@ -77,6 +77,7 @@ func main() { autoScalingRunnerSetOnly bool enableLeaderElection bool disableAdmissionWebhook bool + updateStrategy string leaderElectionId string port int syncPeriod time.Duration @@ -131,6 +132,7 @@ func main() { flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`) flag.StringVar(&logFormat, "log-format", "text", `The log format. Valid options are "text" and "json". Defaults to "text"`) flag.BoolVar(&autoScalingRunnerSetOnly, "auto-scaling-runner-set-only", false, "Make controller only reconcile AutoRunnerScaleSet object.") + flag.StringVar(&updateStrategy, "update-strategy", "immediate", `Resources reconciliation strategy on upgrade with running/pending jobs. Valid values are: "immediate", "eventual". Defaults to "immediate".`) flag.Var(&autoScalerImagePullSecrets, "auto-scaler-image-pull-secrets", "The default image-pull secret name for auto-scaler listener container.") flag.Parse() @@ -169,6 +171,14 @@ func main() { if len(watchSingleNamespace) > 0 { newCache = cache.MultiNamespacedCacheBuilder([]string{managerNamespace, watchSingleNamespace}) } + + switch updateStrategy { + case "eventual", "immediate": + log.Info(`Update strategy set to:`, "updateStrategy", updateStrategy) + default: + log.Info(`Update strategy not recognized. Defaulting to "immediately"`, "updateStrategy", updateStrategy) + updateStrategy = "immediate" + } } listenerPullPolicy := os.Getenv("CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY") @@ -216,6 +226,7 @@ func main() { ControllerNamespace: managerNamespace, DefaultRunnerScaleSetListenerImage: managerImage, ActionsClient: actionsMultiClient, + UpdateStrategy: actionsgithubcom.UpdateStrategy(updateStrategy), DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets, }).SetupWithManager(mgr); err != nil { log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet") @@ -241,6 +252,7 @@ func main() { log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet") os.Exit(1) } + if err = (&actionsgithubcom.AutoscalingListenerReconciler{ Client: mgr.GetClient(), Log: log.WithName("AutoscalingListener"), From 2090a869941499c861e597d1fb6b7078299965e5 Mon Sep 17 00:00:00 2001 From: Armin Becher Date: Sat, 27 May 2023 04:22:44 +0200 Subject: [PATCH 219/561] Fix typo in HorizontalRunnerAutoscaler (#2563) Co-authored-by: Yusuke Kuoka --- .../v1alpha1/horizontalrunnerautoscaler_types.go | 2 +- .../actions.summerwind.dev_horizontalrunnerautoscalers.yaml | 2 +- .../actions.summerwind.dev_horizontalrunnerautoscalers.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apis/actions.summerwind.net/v1alpha1/horizontalrunnerautoscaler_types.go b/apis/actions.summerwind.net/v1alpha1/horizontalrunnerautoscaler_types.go index 138d8da1db..84b85ae607 100644 --- a/apis/actions.summerwind.net/v1alpha1/horizontalrunnerautoscaler_types.go +++ b/apis/actions.summerwind.net/v1alpha1/horizontalrunnerautoscaler_types.go @@ -22,7 +22,7 @@ import ( // HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler type HorizontalRunnerAutoscalerSpec struct { - // ScaleTargetRef sis the reference to scaled resource like RunnerDeployment + // ScaleTargetRef is the reference to scaled resource like RunnerDeployment ScaleTargetRef ScaleTargetRef `json:"scaleTargetRef,omitempty"` // MinReplicas is the minimum number of replicas the deployment is allowed to scale diff --git a/charts/actions-runner-controller/crds/actions.summerwind.dev_horizontalrunnerautoscalers.yaml b/charts/actions-runner-controller/crds/actions.summerwind.dev_horizontalrunnerautoscalers.yaml index da1fd06baf..b82589de58 100644 --- a/charts/actions-runner-controller/crds/actions.summerwind.dev_horizontalrunnerautoscalers.yaml +++ b/charts/actions-runner-controller/crds/actions.summerwind.dev_horizontalrunnerautoscalers.yaml @@ -113,7 +113,7 @@ spec: description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop) type: integer scaleTargetRef: - description: ScaleTargetRef sis the reference to scaled resource like RunnerDeployment + description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment properties: kind: description: Kind is the type of resource being referenced diff --git a/config/crd/bases/actions.summerwind.dev_horizontalrunnerautoscalers.yaml b/config/crd/bases/actions.summerwind.dev_horizontalrunnerautoscalers.yaml index da1fd06baf..b82589de58 100644 --- a/config/crd/bases/actions.summerwind.dev_horizontalrunnerautoscalers.yaml +++ b/config/crd/bases/actions.summerwind.dev_horizontalrunnerautoscalers.yaml @@ -113,7 +113,7 @@ spec: description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop) type: integer scaleTargetRef: - description: ScaleTargetRef sis the reference to scaled resource like RunnerDeployment + description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment properties: kind: description: Kind is the type of resource being referenced From 326896cdf3df95ba31b2a9318a187166eb363942 Mon Sep 17 00:00:00 2001 From: Changliang Wu <44141526+wu-cl@users.noreply.github.com> Date: Sat, 27 May 2023 10:32:46 +0800 Subject: [PATCH 220/561] feat: support configure docker insecure registry with env (#2606) --- runner/entrypoint-dind.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/runner/entrypoint-dind.sh b/runner/entrypoint-dind.sh index 78356bb5c7..56bd6e854d 100755 --- a/runner/entrypoint-dind.sh +++ b/runner/entrypoint-dind.sh @@ -23,6 +23,10 @@ fi if [ -n "${DOCKER_REGISTRY_MIRROR}" ]; then jq ".\"registry-mirrors\"[0] = \"${DOCKER_REGISTRY_MIRROR}\"" /etc/docker/daemon.json > /tmp/.daemon.json && mv /tmp/.daemon.json /etc/docker/daemon.json fi + +if [ -n "${DOCKER_INSECURE_REGISTRY}" ]; then +jq ".\"insecure-registries\"[0] = \"${DOCKER_INSECURE_REGISTRY}\"" /etc/docker/daemon.json > /tmp/.daemon.json && mv /tmp/.daemon.json /etc/docker/daemon.json +fi SCRIPT dumb-init bash <<'SCRIPT' & From f93b694d512c25fac89a25ddae97ba20f07e5f9a Mon Sep 17 00:00:00 2001 From: Vincent Rivellino Date: Fri, 26 May 2023 22:33:20 -0400 Subject: [PATCH 221/561] fix: labels on github webhook service template (#2582) --- .../templates/githubwebhook.service.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/actions-runner-controller/templates/githubwebhook.service.yaml b/charts/actions-runner-controller/templates/githubwebhook.service.yaml index 99a7ea2c25..6835c8cf32 100644 --- a/charts/actions-runner-controller/templates/githubwebhook.service.yaml +++ b/charts/actions-runner-controller/templates/githubwebhook.service.yaml @@ -5,7 +5,7 @@ metadata: name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }} namespace: {{ .Release.Namespace }} labels: - {{- include "actions-runner-controller.labels" . | nindent 4 }} + {{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }} {{- if .Values.githubWebhookServer.service.annotations }} annotations: {{ toYaml .Values.githubWebhookServer.service.annotations | nindent 4 }} From 00a9104f2fd49136e6e8999a9cd5a051e71803bf Mon Sep 17 00:00:00 2001 From: robert lestak Date: Fri, 26 May 2023 19:33:46 -0700 Subject: [PATCH 222/561] enable passing docker-gid in helm chart (#2574) --- charts/actions-runner-controller/templates/deployment.yaml | 3 +++ charts/actions-runner-controller/values.yaml | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/charts/actions-runner-controller/templates/deployment.yaml b/charts/actions-runner-controller/templates/deployment.yaml index 020e966299..845da8356b 100644 --- a/charts/actions-runner-controller/templates/deployment.yaml +++ b/charts/actions-runner-controller/templates/deployment.yaml @@ -70,6 +70,9 @@ spec: {{- if .Values.logFormat }} - "--log-format={{ .Values.logFormat }}" {{- end }} + {{- if .Values.dockerGID }} + - "--docker-gid={{ .Values.dockerGID }}" + {{- end }} command: - "/manager" env: diff --git a/charts/actions-runner-controller/values.yaml b/charts/actions-runner-controller/values.yaml index 9d4dab840e..97d0eb7437 100644 --- a/charts/actions-runner-controller/values.yaml +++ b/charts/actions-runner-controller/values.yaml @@ -192,6 +192,10 @@ admissionWebHooks: ## specify log format for actions runner controller. Valid options are "text" and "json" logFormat: text +# enable setting the docker group id for the runner container +# https://github.com/actions/actions-runner-controller/pull/2499 +#dockerGID: 121 + githubWebhookServer: enabled: false replicaCount: 1 From f7201fbb4d60029999073c7a8dd371e8a859d457 Mon Sep 17 00:00:00 2001 From: Daniel Hobley Date: Sat, 27 May 2023 04:47:23 +0200 Subject: [PATCH 223/561] feat: allow for modifying `var-run` mount maximum size limit (#2624) --- TROUBLESHOOTING.md | 24 +++++++++++++++++++ .../v1alpha1/runner_types.go | 2 ++ .../v1alpha1/zz_generated.deepcopy.go | 5 ++++ ...ions.summerwind.dev_runnerdeployments.yaml | 6 +++++ ...ions.summerwind.dev_runnerreplicasets.yaml | 6 +++++ .../crds/actions.summerwind.dev_runners.yaml | 6 +++++ .../actions.summerwind.dev_runnersets.yaml | 6 +++++ ...ions.summerwind.dev_runnerdeployments.yaml | 6 +++++ ...ions.summerwind.dev_runnerreplicasets.yaml | 6 +++++ .../bases/actions.summerwind.dev_runners.yaml | 6 +++++ .../actions.summerwind.dev_runnersets.yaml | 6 +++++ .../runner_controller.go | 9 +++++-- 12 files changed, 86 insertions(+), 2 deletions(-) diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md index 2e5c92c245..3807005134 100644 --- a/TROUBLESHOOTING.md +++ b/TROUBLESHOOTING.md @@ -304,3 +304,27 @@ If you noticed that it takes several minutes for sidecar dind container to be cr **Solution** The solution is to switch to using faster storage, if you are experiencing this issue you are probably using HDD storage. Switching to SSD storage fixed the problem in my case. Most cloud providers have a list of storage options to use just pick something faster that your current disk, for on prem clusters you will need to invest in some SSDs. + +### Dockerd no space left on device + +**Problem** + +If you are running many containers on your runner you might encounter an issue where docker daemon is unable to start new containers and you see error `no space left on device`. + +**Solution** + +Add a `dockerVarRunVolumeSizeLimit` key in your runner's spec with a higher size limit (the default is 1M) For instance: + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: github-runner + namespace: github-system +spec: + replicas: 6 + template: + spec: + dockerVarRunVolumeSizeLimit: 50M + env: [] +``` \ No newline at end of file diff --git a/apis/actions.summerwind.net/v1alpha1/runner_types.go b/apis/actions.summerwind.net/v1alpha1/runner_types.go index 7986efad90..ca62238e58 100644 --- a/apis/actions.summerwind.net/v1alpha1/runner_types.go +++ b/apis/actions.summerwind.net/v1alpha1/runner_types.go @@ -70,6 +70,8 @@ type RunnerConfig struct { // +optional DockerRegistryMirror *string `json:"dockerRegistryMirror,omitempty"` // +optional + DockerVarRunVolumeSizeLimit *resource.Quantity `json:"dockerVarRunVolumeSizeLimit,omitempty"` + // +optional VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"` // +optional VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"` diff --git a/apis/actions.summerwind.net/v1alpha1/zz_generated.deepcopy.go b/apis/actions.summerwind.net/v1alpha1/zz_generated.deepcopy.go index bc450b7c11..ac21ce4927 100644 --- a/apis/actions.summerwind.net/v1alpha1/zz_generated.deepcopy.go +++ b/apis/actions.summerwind.net/v1alpha1/zz_generated.deepcopy.go @@ -436,6 +436,11 @@ func (in *RunnerConfig) DeepCopyInto(out *RunnerConfig) { *out = new(string) **out = **in } + if in.DockerVarRunVolumeSizeLimit != nil { + in, out := &in.DockerVarRunVolumeSizeLimit, &out.DockerVarRunVolumeSizeLimit + x := (*in).DeepCopy() + *out = &x + } if in.VolumeSizeLimit != nil { in, out := &in.VolumeSizeLimit, &out.VolumeSizeLimit x := (*in).DeepCopy() diff --git a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml index cb5f54ae55..4291b6b618 100644 --- a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml +++ b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerdeployments.yaml @@ -1497,6 +1497,12 @@ spec: type: integer dockerRegistryMirror: type: string + dockerVarRunVolumeSizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true dockerVolumeMounts: items: description: VolumeMount describes a mounting of a Volume within a container. diff --git a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerreplicasets.yaml b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerreplicasets.yaml index a2e6aba749..877bd518b9 100644 --- a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerreplicasets.yaml +++ b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnerreplicasets.yaml @@ -1479,6 +1479,12 @@ spec: type: integer dockerRegistryMirror: type: string + dockerVarRunVolumeSizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true dockerVolumeMounts: items: description: VolumeMount describes a mounting of a Volume within a container. diff --git a/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml b/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml index beaea51b0a..c688cc6d47 100644 --- a/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml +++ b/charts/actions-runner-controller/crds/actions.summerwind.dev_runners.yaml @@ -1432,6 +1432,12 @@ spec: type: integer dockerRegistryMirror: type: string + dockerVarRunVolumeSizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true dockerVolumeMounts: items: description: VolumeMount describes a mounting of a Volume within a container. diff --git a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnersets.yaml b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnersets.yaml index 7cdb81561d..1140928142 100644 --- a/charts/actions-runner-controller/crds/actions.summerwind.dev_runnersets.yaml +++ b/charts/actions-runner-controller/crds/actions.summerwind.dev_runnersets.yaml @@ -55,6 +55,12 @@ spec: type: integer dockerRegistryMirror: type: string + dockerVarRunVolumeSizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true dockerdWithinRunnerContainer: type: boolean effectiveTime: diff --git a/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml b/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml index cb5f54ae55..4291b6b618 100644 --- a/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml +++ b/config/crd/bases/actions.summerwind.dev_runnerdeployments.yaml @@ -1497,6 +1497,12 @@ spec: type: integer dockerRegistryMirror: type: string + dockerVarRunVolumeSizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true dockerVolumeMounts: items: description: VolumeMount describes a mounting of a Volume within a container. diff --git a/config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml b/config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml index a2e6aba749..877bd518b9 100644 --- a/config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml +++ b/config/crd/bases/actions.summerwind.dev_runnerreplicasets.yaml @@ -1479,6 +1479,12 @@ spec: type: integer dockerRegistryMirror: type: string + dockerVarRunVolumeSizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true dockerVolumeMounts: items: description: VolumeMount describes a mounting of a Volume within a container. diff --git a/config/crd/bases/actions.summerwind.dev_runners.yaml b/config/crd/bases/actions.summerwind.dev_runners.yaml index beaea51b0a..c688cc6d47 100644 --- a/config/crd/bases/actions.summerwind.dev_runners.yaml +++ b/config/crd/bases/actions.summerwind.dev_runners.yaml @@ -1432,6 +1432,12 @@ spec: type: integer dockerRegistryMirror: type: string + dockerVarRunVolumeSizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true dockerVolumeMounts: items: description: VolumeMount describes a mounting of a Volume within a container. diff --git a/config/crd/bases/actions.summerwind.dev_runnersets.yaml b/config/crd/bases/actions.summerwind.dev_runnersets.yaml index 7cdb81561d..1140928142 100644 --- a/config/crd/bases/actions.summerwind.dev_runnersets.yaml +++ b/config/crd/bases/actions.summerwind.dev_runnersets.yaml @@ -55,6 +55,12 @@ spec: type: integer dockerRegistryMirror: type: string + dockerVarRunVolumeSizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true dockerdWithinRunnerContainer: type: boolean effectiveTime: diff --git a/controllers/actions.summerwind.net/runner_controller.go b/controllers/actions.summerwind.net/runner_controller.go index 574d08aa91..a711fd8c71 100644 --- a/controllers/actions.summerwind.net/runner_controller.go +++ b/controllers/actions.summerwind.net/runner_controller.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "k8s.io/apimachinery/pkg/api/resource" "reflect" "strconv" "strings" @@ -30,7 +31,6 @@ import ( "github.com/go-logr/logr" kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" @@ -810,6 +810,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru dockerRegistryMirror = *runnerSpec.DockerRegistryMirror } + if runnerSpec.DockerVarRunVolumeSizeLimit == nil { + runnerSpec.DockerVarRunVolumeSizeLimit = resource.NewScaledQuantity(1, resource.Mega) + + } + // Be aware some of the environment variables are used // in the runner entrypoint script env := []corev1.EnvVar{ @@ -1080,7 +1085,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{ Medium: corev1.StorageMediumMemory, - SizeLimit: resource.NewScaledQuantity(1, resource.Mega), + SizeLimit: runnerSpec.DockerVarRunVolumeSizeLimit, }, }, }, From 872039225e05a874f1804153ab7323bcad603b46 Mon Sep 17 00:00:00 2001 From: Thang Le Date: Sun, 28 May 2023 08:36:55 +0100 Subject: [PATCH 224/561] Use head_branch metric (#2549) Co-authored-by: Yusuke Kuoka --- .../actions.summerwind.net/autoscaling.go | 2 +- .../horizontal_runner_autoscaler_webhook.go | 2 +- ...rizontal_runner_autoscaler_webhook_test.go | 2 +- .../integration_test.go | 2 +- .../runner_graceful_stop.go | 2 +- github/fake/runners.go | 2 +- github/github.go | 2 +- github/github_test.go | 2 +- go.mod | 17 +++---- go.sum | 44 +++++++++---------- pkg/actionsmetrics/event_reader.go | 8 +++- pkg/actionsmetrics/metrics.go | 2 +- pkg/actionsmetrics/webhookserver.go | 2 +- .../githubwebhookdelivery.go | 2 +- pkg/hookdeliveryforwarder/forwarder.go | 2 +- pkg/hookdeliveryforwarder/hooks.go | 2 +- pkg/hookdeliveryforwarder/hooks_deliveries.go | 2 +- pkg/hookdeliveryforwarder/multiforwarder.go | 2 +- simulator/runnergroups.go | 2 +- test/e2e/e2e_test.go | 2 +- 20 files changed, 54 insertions(+), 49 deletions(-) diff --git a/controllers/actions.summerwind.net/autoscaling.go b/controllers/actions.summerwind.net/autoscaling.go index 906bdce2c7..ab6230511a 100644 --- a/controllers/actions.summerwind.net/autoscaling.go +++ b/controllers/actions.summerwind.net/autoscaling.go @@ -11,7 +11,7 @@ import ( "github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1" prometheus_metrics "github.com/actions/actions-runner-controller/controllers/actions.summerwind.net/metrics" arcgithub "github.com/actions/actions-runner-controller/github" - "github.com/google/go-github/v47/github" + "github.com/google/go-github/v52/github" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go index 09013c7f58..85c4bc4829 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/go-logr/logr" - gogithub "github.com/google/go-github/v47/github" + gogithub "github.com/google/go-github/v52/github" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go index 20f81f17f2..fd7e0890eb 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go @@ -14,7 +14,7 @@ import ( actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1" "github.com/go-logr/logr" - "github.com/google/go-github/v47/github" + "github.com/google/go-github/v52/github" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" diff --git a/controllers/actions.summerwind.net/integration_test.go b/controllers/actions.summerwind.net/integration_test.go index 2dc34ca7b4..47a97f303b 100644 --- a/controllers/actions.summerwind.net/integration_test.go +++ b/controllers/actions.summerwind.net/integration_test.go @@ -8,7 +8,7 @@ import ( "time" github2 "github.com/actions/actions-runner-controller/github" - "github.com/google/go-github/v47/github" + "github.com/google/go-github/v52/github" "github.com/actions/actions-runner-controller/github/fake" diff --git a/controllers/actions.summerwind.net/runner_graceful_stop.go b/controllers/actions.summerwind.net/runner_graceful_stop.go index a3cdb43f41..623dd4b6c5 100644 --- a/controllers/actions.summerwind.net/runner_graceful_stop.go +++ b/controllers/actions.summerwind.net/runner_graceful_stop.go @@ -9,7 +9,7 @@ import ( "github.com/actions/actions-runner-controller/github" "github.com/go-logr/logr" - gogithub "github.com/google/go-github/v47/github" + gogithub "github.com/google/go-github/v52/github" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" diff --git a/github/fake/runners.go b/github/fake/runners.go index 094a5606ce..e9355cc1fc 100644 --- a/github/fake/runners.go +++ b/github/fake/runners.go @@ -8,7 +8,7 @@ import ( "github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1" - "github.com/google/go-github/v47/github" + "github.com/google/go-github/v52/github" "github.com/gorilla/mux" ) diff --git a/github/github.go b/github/github.go index f5d710d330..13ef7a6c92 100644 --- a/github/github.go +++ b/github/github.go @@ -15,7 +15,7 @@ import ( "github.com/actions/actions-runner-controller/logging" "github.com/bradleyfalzon/ghinstallation/v2" "github.com/go-logr/logr" - "github.com/google/go-github/v47/github" + "github.com/google/go-github/v52/github" "github.com/gregjones/httpcache" "golang.org/x/oauth2" ) diff --git a/github/github_test.go b/github/github_test.go index 8fa22164a5..a581b45edf 100644 --- a/github/github_test.go +++ b/github/github_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/actions/actions-runner-controller/github/fake" - "github.com/google/go-github/v47/github" + "github.com/google/go-github/v52/github" ) var server *httptest.Server diff --git a/go.mod b/go.mod index 350b8809f9..8d00b1e8a1 100644 --- a/go.mod +++ b/go.mod @@ -9,8 +9,7 @@ require ( github.com/go-logr/logr v1.2.3 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/google/go-cmp v0.5.9 - github.com/google/go-github/v47 v47.1.0 - github.com/google/go-github/v50 v50.0.0 + github.com/google/go-github/v52 v52.0.0 github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 @@ -26,8 +25,8 @@ require ( github.com/teambition/rrule-go v1.8.2 go.uber.org/multierr v1.7.0 go.uber.org/zap v1.24.0 - golang.org/x/net v0.8.0 - golang.org/x/oauth2 v0.1.0 + golang.org/x/net v0.9.0 + golang.org/x/oauth2 v0.7.0 golang.org/x/sync v0.1.0 gomodules.xyz/jsonpatch/v2 v2.2.0 gopkg.in/yaml.v2 v2.4.0 @@ -39,10 +38,12 @@ require ( ) require ( + github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/aws/aws-sdk-go v1.44.122 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cloudflare/circl v1.1.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect @@ -89,10 +90,10 @@ require ( github.com/stretchr/objx v0.5.0 // indirect github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect - golang.org/x/crypto v0.1.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/crypto v0.7.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/term v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.6.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index 5aa01e1523..cdab23f83f 100644 --- a/go.sum +++ b/go.sum @@ -33,6 +33,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI= github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -41,8 +43,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/aws/aws-sdk-go v1.40.56 h1:FM2yjR0UUYFzDTMx+mH9Vyw1k1EUUxsAFzk+BjkzANA= -github.com/aws/aws-sdk-go v1.40.56/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -56,6 +56,7 @@ github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8 github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 h1:5+NghM1Zred9Z078QEZtm28G/kfDfZN/92gkDlLwGVA= github.com/bradleyfalzon/ghinstallation/v2 v2.1.0/go.mod h1:Xg3xPRN5Mcq6GDqeUVhFbjEWMb4JHCyWEeeBGEYQoTU= +github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -64,6 +65,8 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= +github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= @@ -180,10 +183,8 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FCD+K3FI= github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28= -github.com/google/go-github/v47 v47.1.0 h1:Cacm/WxQBOa9lF0FT0EMjZ2BWMetQ1TQfyurn4yF1z8= -github.com/google/go-github/v47 v47.1.0/go.mod h1:VPZBXNbFSJGjyjFRUKo9vZGawTajnWzC/YjGw/oFKi0= -github.com/google/go-github/v50 v50.0.0 h1:gdO1AeuSZZK4iYWwVbjni7zg8PIQhp7QfmPunr016Jk= -github.com/google/go-github/v50 v50.0.0/go.mod h1:Ev4Tre8QoKiolvbpOSG3FIi4Mlon3S2Nt9W5JYqKiwA= +github.com/google/go-github/v52 v52.0.0 h1:uyGWOY+jMQ8GVGSX8dkSwCzlehU3WfdxQ7GweO/JP7M= +github.com/google/go-github/v52 v52.0.0/go.mod h1:WJV6VEEUPuMo5pXqqa2ZCZEdbQqua4zAk2MZTIo+m+4= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -210,8 +211,6 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro= github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78= -github.com/gruntwork-io/terratest v0.41.11 h1:EAHiK6PFWJCVkgW2yUompjSsZQzA0CfBcuqIaXtZdpk= -github.com/gruntwork-io/terratest v0.41.11/go.mod h1:qH1xkPTTGx30XkMHw8jAVIbzqheSjIa5IyiTwSV2vKI= github.com/gruntwork-io/terratest v0.41.24 h1:j6T6qe4deVvynTG2UmnjGwZy83he6xKgTaYWiSdFv/w= github.com/gruntwork-io/terratest v0.41.24/go.mod h1:O6gajNBjO1wvc7Wl9WtbO+ORcdnhAV2GQiBE71ycwIk= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -393,8 +392,9 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -458,11 +458,10 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -470,10 +469,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= -golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -530,15 +527,16 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -546,8 +544,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/pkg/actionsmetrics/event_reader.go b/pkg/actionsmetrics/event_reader.go index 12b5f9b737..606891fe5e 100644 --- a/pkg/actionsmetrics/event_reader.go +++ b/pkg/actionsmetrics/event_reader.go @@ -10,7 +10,7 @@ import ( "time" "github.com/go-logr/logr" - gogithub "github.com/google/go-github/v50/github" + gogithub "github.com/google/go-github/v52/github" "github.com/prometheus/client_golang/prometheus" "github.com/actions/actions-runner-controller/github" @@ -98,13 +98,19 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in labels["organization"] = org var wn string + var hb string if e.WorkflowJob != nil { if n := e.WorkflowJob.WorkflowName; n != nil { wn = *n keysAndValues = append(keysAndValues, "workflow_name", *n) } + if n := e.WorkflowJob.HeadBranch; n != nil { + hb = *n + keysAndValues = append(keysAndValues, "head_branch", *n) + } } labels["workflow_name"] = wn + labels["head_branch"] = hb log := reader.Log.WithValues(keysAndValues...) diff --git a/pkg/actionsmetrics/metrics.go b/pkg/actionsmetrics/metrics.go index 1c0deb1b65..96619f37ee 100644 --- a/pkg/actionsmetrics/metrics.go +++ b/pkg/actionsmetrics/metrics.go @@ -76,7 +76,7 @@ func metricLabels(extras ...string) []string { } var ( - commonLabels = []string{"runs_on", "job_name", "organization", "repository", "repository_full_name", "owner", "workflow_name"} + commonLabels = []string{"runs_on", "job_name", "organization", "repository", "repository_full_name", "owner", "workflow_name", "head_branch"} githubWorkflowJobQueueDurationSeconds = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "github_workflow_job_queue_duration_seconds", diff --git a/pkg/actionsmetrics/webhookserver.go b/pkg/actionsmetrics/webhookserver.go index 7d4cb8949d..a102b0dff0 100644 --- a/pkg/actionsmetrics/webhookserver.go +++ b/pkg/actionsmetrics/webhookserver.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/go-logr/logr" - gogithub "github.com/google/go-github/v50/github" + gogithub "github.com/google/go-github/v52/github" ctrl "sigs.k8s.io/controller-runtime" "github.com/actions/actions-runner-controller/github" diff --git a/pkg/githubwebhookdeliveryforwarder/githubwebhookdelivery.go b/pkg/githubwebhookdeliveryforwarder/githubwebhookdelivery.go index 3b77bcdda4..829cda8251 100644 --- a/pkg/githubwebhookdeliveryforwarder/githubwebhookdelivery.go +++ b/pkg/githubwebhookdeliveryforwarder/githubwebhookdelivery.go @@ -11,7 +11,7 @@ import ( "time" "github.com/actions/actions-runner-controller/github" - gogithub "github.com/google/go-github/v47/github" + gogithub "github.com/google/go-github/v52/github" ) type server struct { diff --git a/pkg/hookdeliveryforwarder/forwarder.go b/pkg/hookdeliveryforwarder/forwarder.go index 3a2d946a2c..1b62ff5f5d 100644 --- a/pkg/hookdeliveryforwarder/forwarder.go +++ b/pkg/hookdeliveryforwarder/forwarder.go @@ -12,7 +12,7 @@ import ( "time" "github.com/actions/actions-runner-controller/github" - gogithub "github.com/google/go-github/v47/github" + gogithub "github.com/google/go-github/v52/github" ) type Forwarder struct { diff --git a/pkg/hookdeliveryforwarder/hooks.go b/pkg/hookdeliveryforwarder/hooks.go index f603913848..e2f160cbee 100644 --- a/pkg/hookdeliveryforwarder/hooks.go +++ b/pkg/hookdeliveryforwarder/hooks.go @@ -3,7 +3,7 @@ package hookdeliveryforwarder import ( "context" - gogithub "github.com/google/go-github/v47/github" + gogithub "github.com/google/go-github/v52/github" ) type hooksAPI struct { diff --git a/pkg/hookdeliveryforwarder/hooks_deliveries.go b/pkg/hookdeliveryforwarder/hooks_deliveries.go index 40774f8025..3cce537a24 100644 --- a/pkg/hookdeliveryforwarder/hooks_deliveries.go +++ b/pkg/hookdeliveryforwarder/hooks_deliveries.go @@ -3,7 +3,7 @@ package hookdeliveryforwarder import ( "context" - gogithub "github.com/google/go-github/v47/github" + gogithub "github.com/google/go-github/v52/github" ) type hookDeliveriesAPI struct { diff --git a/pkg/hookdeliveryforwarder/multiforwarder.go b/pkg/hookdeliveryforwarder/multiforwarder.go index 3f3e3a0589..6b3609bd07 100644 --- a/pkg/hookdeliveryforwarder/multiforwarder.go +++ b/pkg/hookdeliveryforwarder/multiforwarder.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/actions/actions-runner-controller/github" - gogithub "github.com/google/go-github/v47/github" + gogithub "github.com/google/go-github/v52/github" ) type MultiForwarder struct { diff --git a/simulator/runnergroups.go b/simulator/runnergroups.go index d67744c006..4f1a84fd76 100644 --- a/simulator/runnergroups.go +++ b/simulator/runnergroups.go @@ -5,7 +5,7 @@ import ( "sort" "strings" - "github.com/google/go-github/v47/github" + "github.com/google/go-github/v52/github" ) type RunnerGroupScope int diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 91038ee495..87df15124d 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -12,7 +12,7 @@ import ( "time" "github.com/actions/actions-runner-controller/testing" - "github.com/google/go-github/v47/github" + "github.com/google/go-github/v52/github" "github.com/onsi/gomega" "github.com/stretchr/testify/require" "golang.org/x/oauth2" From 96a712f274b2463f567e3a51a74431345acaec2e Mon Sep 17 00:00:00 2001 From: Nuru Date: Mon, 29 May 2023 17:04:57 -0700 Subject: [PATCH 225/561] Update unconsumed HRA capacity reservation's expiration more frequently and consistently (#2502) Co-authored-by: Yusuke Kuoka --- ...orizontal_runner_autoscaler_batch_scale.go | 185 +++++++++++------- ...ntal_runner_autoscaler_batch_scale_test.go | 166 ++++++++++++++++ ...rizontal_runner_autoscaler_webhook_test.go | 5 + docs/automatically-scaling-runners.md | 40 +++- 4 files changed, 313 insertions(+), 83 deletions(-) create mode 100644 controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale_test.go diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go index 236317de17..8f537005eb 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale.go @@ -44,8 +44,8 @@ type scaleOperation struct { // Add the scale target to the unbounded queue, blocking until the target is successfully added to the queue. // All the targets in the queue are dequeued every 3 seconds, grouped by the HRA, and applied. -// In a happy path, batchScaler update each HRA only once, even though the HRA had two or more associated webhook events in the 3 seconds interval, -// which results in less K8s API calls and less HRA update conflicts in case your ARC installation receives a lot of webhook events +// In a happy path, batchScaler updates each HRA only once, even though the HRA had two or more associated webhook events in the 3 seconds interval, +// which results in fewer K8s API calls and fewer HRA update conflicts in case your ARC installation receives a lot of webhook events func (s *batchScaler) Add(st *ScaleTarget) { if st == nil { return @@ -142,87 +142,130 @@ func (s *batchScaler) batchScale(ctx context.Context, batch batchScaleOperation) return err } - copy := hra.DeepCopy() + now := time.Now() - copy.Spec.CapacityReservations = getValidCapacityReservations(copy) + copy, err := s.planBatchScale(ctx, batch, &hra, now) + if err != nil { + return err + } - var added, completed int + if err := s.Client.Patch(ctx, copy, client.MergeFrom(&hra)); err != nil { + return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err) + } - for _, scale := range batch.scaleOps { - amount := 1 + return nil +} + +func (s *batchScaler) planBatchScale(ctx context.Context, batch batchScaleOperation, hra *v1alpha1.HorizontalRunnerAutoscaler, now time.Time) (*v1alpha1.HorizontalRunnerAutoscaler, error) { + copy := hra.DeepCopy() - if scale.trigger.Amount != 0 { - amount = scale.trigger.Amount + if hra.Spec.MaxReplicas != nil && len(copy.Spec.CapacityReservations) > *copy.Spec.MaxReplicas { + // We have more reservations than MaxReplicas, meaning that we previously + // could not scale up to meet a capacity demand because we had hit MaxReplicas. + // Therefore, there are reservations that are starved for capacity. We extend the + // expiration time on these starved reservations because the "duration" is meant + // to apply to reservations that have launched replicas, not replicas in the backlog. + // Of course, if MaxReplicas is nil, then there is no max to hit, and we do not need this adjustment. + // See https://github.com/actions/actions-runner-controller/issues/2254 for more context. + + // Extend the expiration time of all the reservations not yet assigned to replicas. + // + // Note that we assume that the two scenarios equivalent here. + // The first case is where the number of reservations become greater than MaxReplicas. + // The second case is where MaxReplicas become greater than the number of reservations equivalent. + // Presuming the HRA.spec.scaleTriggers[].duration as "the duration until the reservation expires after a corresponding runner was deployed", + // it's correct. + // + // In other words, we settle on a capacity reservation's ExpirationTime only after the corresponding runner is "about to be" deployed. + // It's "about to be deployed" not "deployed" because we have no way to correlate a capacity reservation and the runner; + // the best we can do here is to simulate the desired behavior by reading MaxReplicas and assuming it will be equal to the number of active runners soon. + // + // Perhaps we could use RunnerDeployment.Status.Replicas or RunnerSet.Status.Replicas instead of the MaxReplicas as a better source of "the number of active runners". + // However, note that the status is not guaranteed to be up-to-date. + // It might not be that easy to decide which is better to use. + for i := *hra.Spec.MaxReplicas; i < len(copy.Spec.CapacityReservations); i++ { + // Let's say maxReplicas=3 and the workflow job of status=completed result in deleting the first capacity reservation + // copy.Spec.CapacityReservations[i] where i=0. + // We are interested in at least four reservations and runners: + // i=0 - already included in the current desired replicas, but may be about to be deleted + // i=1-2 - already included in the current desired replicas + // i=3 - not yet included in the current desired replicas, might have been expired while waiting in the queue + // + // i=3 is especially important here- If we didn't reset the expiration time of this reservation, + // it might expire before it is assigned to a runner, due to the delay between the time the + // expiration timer starts and the time a runner becomes available. + // + // Why is there such delay? Because ARC implements the scale duration and expiration as such. + // The expiration timer starts when the reservation is created, while the runner is created only after + // the corresponding reservation fits within maxReplicas. + // + // We address that, by resetting the expiration time for fourth(i=3 in the above example) + // and subsequent reservations whenever a batch is run (which is when expired reservations get deleted). + + // There is no guarantee that all the reservations have the same duration, and even if there were, + // at this point we have lost the reference to the duration that was intended. + // However, we can compute the intended duration from the existing interval. + // + // In other words, updating HRA.spec.scaleTriggers[].duration does not result in delaying capacity reservations expiration any longer + // than the "intended" duration, which is the duration of the trigger when the reservation was created. + duration := copy.Spec.CapacityReservations[i].ExpirationTime.Time.Sub(copy.Spec.CapacityReservations[i].EffectiveTime.Time) + copy.Spec.CapacityReservations[i].EffectiveTime = metav1.Time{Time: now} + copy.Spec.CapacityReservations[i].ExpirationTime = metav1.Time{Time: now.Add(duration)} } + } - scale.log.V(2).Info("Adding capacity reservation", "amount", amount) + // Now we can filter out any expired reservations from consideration. + // This could leave us with 0 reservations left. + copy.Spec.CapacityReservations = getValidCapacityReservations(copy) + before := len(hra.Spec.CapacityReservations) + expired := before - len(copy.Spec.CapacityReservations) - now := time.Now() - if amount > 0 { - copy.Spec.CapacityReservations = append(copy.Spec.CapacityReservations, v1alpha1.CapacityReservation{ - EffectiveTime: metav1.Time{Time: now}, - ExpirationTime: metav1.Time{Time: now.Add(scale.trigger.Duration.Duration)}, - Replicas: amount, - }) + var added, completed int + for _, scale := range batch.scaleOps { + amount := scale.trigger.Amount + + // We do not track if a webhook-based scale-down event matches an expired capacity reservation + // or a job for which the scale-up event was never received. This means that scale-down + // events could drive capacity reservations into the negative numbers if we let it. + // We ensure capacity never falls below zero, but that also means that the + // final number of capacity reservations depends on the order in which events come in. + // If capacity is at zero and we get a scale-down followed by a scale-up, + // the scale-down will be ignored and we will end up with a desired capacity of 1. + // However, if we get the scale-up first, the scale-down will drive desired capacity back to zero. + // This could be fixed by matching events' `workflow_job.run_id` with capacity reservations, + // but that would be a lot of work. So for now we allow for some slop, and hope that + // GitHub provides a better autoscaling solution soon. + if amount > 0 { + scale.log.V(2).Info("Adding capacity reservation", "amount", amount) + + // Parts of this function require that Spec.CapacityReservations.Replicas always equals 1. + // Enforce that rule no matter what the `amount` value is + for i := 0; i < amount; i++ { + copy.Spec.CapacityReservations = append(copy.Spec.CapacityReservations, v1alpha1.CapacityReservation{ + EffectiveTime: metav1.Time{Time: now}, + ExpirationTime: metav1.Time{Time: now.Add(scale.trigger.Duration.Duration)}, + Replicas: 1, + }) + } added += amount } else if amount < 0 { - var reservations []v1alpha1.CapacityReservation - - var ( - found bool - foundIdx int - ) - - for i, r := range copy.Spec.CapacityReservations { - r := r - if !found && r.Replicas+amount == 0 { - found = true - foundIdx = i - } else { - // Note that we nil-check max replicas because this "fix" is needed only when there is the upper limit of runners. - // In other words, you don't need to reset effective time and expiration time when there is no max replicas. - // That's because the desired replicas would already contain the reservation since it's creation. - if found && copy.Spec.MaxReplicas != nil && i > foundIdx+*copy.Spec.MaxReplicas { - // Update newer CapacityReservations' time to now to trigger reconcile - // Without this, we might stuck in minReplicas unnecessarily long. - // That is, we might not scale up after an ephemeral runner has been deleted - // until a new scale up, all runners finish, or after DefaultRunnerPodRecreationDelayAfterWebhookScale - // See https://github.com/actions/actions-runner-controller/issues/2254 for more context. - r.EffectiveTime = metav1.Time{Time: now} - - // We also reset the scale trigger expiration time, so that you don't need to tweak - // scale trigger duratoin depending on maxReplicas. - // A detailed explanation follows. - // - // Let's say maxReplicas=3 and the workflow job of status=canceled result in deleting the first capacity reservation hence i=0. - // We are interested in at least four reservations and runners: - // i=0 - already included in the current desired replicas, but just got deleted - // i=1-2 - already included in the current desired replicas - // i=3 - not yet included in the current desired replicas, might have been expired while waiting in the queue - // - // i=3 is especially important here- If we didn't reset the expiration time of 3rd reservation, - // it might expire before a corresponding runner is created, due to the delay between the expiration timer starts and the runner is created. - // - // Why is there such delay? Because ARC implements the scale duration and expiration as such... - // The expiration timer starts when the reservation is created, while the runner is created only after the corresponding reservation fits within maxReplicas. - // - // We address that, by resetting the expiration time for fourth(i=3 in the above example) and subsequent reservations when the first reservation gets cancelled. - r.ExpirationTime = metav1.Time{Time: now.Add(scale.trigger.Duration.Duration)} - } + scale.log.V(2).Info("Removing capacity reservation", "amount", -amount) - reservations = append(reservations, r) - } + // Remove the requested number of reservations unless there are not that many left + if len(copy.Spec.CapacityReservations) > -amount { + copy.Spec.CapacityReservations = copy.Spec.CapacityReservations[-amount:] + } else { + copy.Spec.CapacityReservations = nil } - - copy.Spec.CapacityReservations = reservations - - completed += amount + // This "completed" represents the number of completed and therefore removed runners in this batch, + // which is logged later. + // As the amount is negative for a scale-down trigger, we make the "completed" amount positive by negating the amount. + // That way, the user can see the number of removed runners(like 3), rather than the delta (like -3) in the number of runners. + completed -= amount } } - before := len(hra.Spec.CapacityReservations) - expired := before - len(copy.Spec.CapacityReservations) after := len(copy.Spec.CapacityReservations) s.Log.V(1).Info( @@ -234,9 +277,5 @@ func (s *batchScaler) batchScale(ctx context.Context, batch batchScaleOperation) "after", after, ) - if err := s.Client.Patch(ctx, copy, client.MergeFrom(&hra)); err != nil { - return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err) - } - - return nil + return copy, nil } diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale_test.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale_test.go new file mode 100644 index 0000000000..76bea24a0f --- /dev/null +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_batch_scale_test.go @@ -0,0 +1,166 @@ +package actionssummerwindnet + +import ( + "context" + "testing" + "time" + + "github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1" + "github.com/go-logr/logr" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestPlanBatchScale(t *testing.T) { + s := &batchScaler{Log: logr.Discard()} + + var ( + expiry = 10 * time.Second + interval = 3 * time.Second + + t0 = time.Now() + t1 = t0.Add(interval) + t2 = t1.Add(interval) + ) + + check := func(t *testing.T, amount int, newExpiry time.Duration, wantReservations []v1alpha1.CapacityReservation) { + t.Helper() + + var ( + op = batchScaleOperation{ + scaleOps: []scaleOperation{ + { + log: logr.Discard(), + trigger: v1alpha1.ScaleUpTrigger{ + Amount: amount, + Duration: metav1.Duration{Duration: newExpiry}, + }, + }, + }, + } + + hra = &v1alpha1.HorizontalRunnerAutoscaler{ + Spec: v1alpha1.HorizontalRunnerAutoscalerSpec{ + MaxReplicas: intPtr(1), + ScaleUpTriggers: []v1alpha1.ScaleUpTrigger{ + { + Amount: 1, + Duration: metav1.Duration{Duration: newExpiry}, + }, + }, + CapacityReservations: []v1alpha1.CapacityReservation{ + { + EffectiveTime: metav1.NewTime(t0), + ExpirationTime: metav1.NewTime(t0.Add(expiry)), + Replicas: 1, + }, + { + EffectiveTime: metav1.NewTime(t1), + ExpirationTime: metav1.NewTime(t1.Add(expiry)), + Replicas: 1, + }, + }, + }, + } + ) + + want := hra.DeepCopy() + + want.Spec.CapacityReservations = wantReservations + + got, err := s.planBatchScale(context.Background(), op, hra, t2) + + require.NoError(t, err) + require.Equal(t, want, got) + } + + t.Run("scale up", func(t *testing.T) { + check(t, 1, expiry, []v1alpha1.CapacityReservation{ + { + // This is kept based on t0 because it falls within maxReplicas + // i.e. the corresponding runner has assumbed to be already deployed. + EffectiveTime: metav1.NewTime(t0), + ExpirationTime: metav1.NewTime(t0.Add(expiry)), + Replicas: 1, + }, + { + // Updated from t1 to t2 due to this exceeded maxReplicas + EffectiveTime: metav1.NewTime(t2), + ExpirationTime: metav1.NewTime(t2.Add(expiry)), + Replicas: 1, + }, + { + // This is based on t2(=now) because it has been added just now. + EffectiveTime: metav1.NewTime(t2), + ExpirationTime: metav1.NewTime(t2.Add(expiry)), + Replicas: 1, + }, + }) + }) + + t.Run("scale up reuses previous scale trigger duration for extension", func(t *testing.T) { + newExpiry := expiry + time.Second + check(t, 1, newExpiry, []v1alpha1.CapacityReservation{ + { + // This is kept based on t0 because it falls within maxReplicas + // i.e. the corresponding runner has assumbed to be already deployed. + EffectiveTime: metav1.NewTime(t0), + ExpirationTime: metav1.NewTime(t0.Add(expiry)), + Replicas: 1, + }, + { + // Updated from t1 to t2 due to this exceeded maxReplicas + EffectiveTime: metav1.NewTime(t2), + ExpirationTime: metav1.NewTime(t2.Add(expiry)), + Replicas: 1, + }, + { + // This is based on t2(=now) because it has been added just now. + EffectiveTime: metav1.NewTime(t2), + ExpirationTime: metav1.NewTime(t2.Add(newExpiry)), + Replicas: 1, + }, + }) + }) + + t.Run("scale down", func(t *testing.T) { + check(t, -1, expiry, []v1alpha1.CapacityReservation{ + { + // Updated from t1 to t2 due to this exceeded maxReplicas + EffectiveTime: metav1.NewTime(t2), + ExpirationTime: metav1.NewTime(t2.Add(expiry)), + Replicas: 1, + }, + }) + }) + + t.Run("scale down is not affected by new scale trigger duration", func(t *testing.T) { + check(t, -1, expiry+time.Second, []v1alpha1.CapacityReservation{ + { + // Updated from t1 to t2 due to this exceeded maxReplicas + EffectiveTime: metav1.NewTime(t2), + ExpirationTime: metav1.NewTime(t2.Add(expiry)), + Replicas: 1, + }, + }) + }) + + // TODO: Keep refreshing the expiry date even when there are no other scale down/up triggers before the expiration + t.Run("extension", func(t *testing.T) { + check(t, 0, expiry, []v1alpha1.CapacityReservation{ + { + // This is kept based on t0 because it falls within maxReplicas + // i.e. the corresponding runner has assumbed to be already deployed. + EffectiveTime: metav1.NewTime(t0), + ExpirationTime: metav1.NewTime(t0.Add(expiry)), + Replicas: 1, + }, + { + // Updated from t1 to t2 due to this exceeded maxReplicas + EffectiveTime: metav1.NewTime(t2), + ExpirationTime: metav1.NewTime(t2.Add(expiry)), + Replicas: 1, + }, + }) + }) +} diff --git a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go index fd7e0890eb..22ef62b83c 100644 --- a/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go +++ b/controllers/actions.summerwind.net/horizontal_runner_autoscaler_webhook_test.go @@ -376,19 +376,24 @@ func TestGetRequest(t *testing.T) { func TestGetValidCapacityReservations(t *testing.T) { now := time.Now() + duration, _ := time.ParseDuration("10m") + effectiveTime := now.Add(-duration) hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{ Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{ CapacityReservations: []actionsv1alpha1.CapacityReservation{ { + EffectiveTime: metav1.Time{Time: effectiveTime.Add(-time.Second)}, ExpirationTime: metav1.Time{Time: now.Add(-time.Second)}, Replicas: 1, }, { + EffectiveTime: metav1.Time{Time: effectiveTime}, ExpirationTime: metav1.Time{Time: now}, Replicas: 2, }, { + EffectiveTime: metav1.Time{Time: effectiveTime.Add(time.Second)}, ExpirationTime: metav1.Time{Time: now.Add(time.Second)}, Replicas: 3, }, diff --git a/docs/automatically-scaling-runners.md b/docs/automatically-scaling-runners.md index 5af157d71c..1049e87a30 100644 --- a/docs/automatically-scaling-runners.md +++ b/docs/automatically-scaling-runners.md @@ -224,34 +224,54 @@ spec: duration: "30m" ``` -The lifecycle of a runner provisioned from a webhook is different to a runner provisioned from the pull based scaling method: +With the `workflowJob` trigger, each event adds or subtracts a single runner. the `scaleUpTriggers.amount` field is ignored. + +The `duration` field is there because event delivery is not guaranteed. If a scale-up event is received, but the corresponding +scale-down event is not, then the extra runner would be left running forever if there were not some clean-up mechanism. +The `duration` field sets the maximum amount of time to wait for a scale-down event. Scale-down happens at the +earlier of receiving the scale-down event or the expiration of `duration` after the scale-up event is processed and +the scale-up itself is initiated. + +The lifecycle of a runner provisioned from a webhook is different from that of a runner provisioned from the pull based scaling method: 1. GitHub sends a `workflow_job` event to ARC with `status=queued` -2. ARC finds a HRA with a `workflow_job` webhook scale trigger that backs a RunnerDeployment / RunnerSet with matching runner labels -3. The matched HRA adds a unit to its `capacityReservations` list -4. ARC adds a replica and sets the EffectiveTime of that replica to current + `HRA.spec.scaleUpTriggers[].duration` +2. ARC finds the HRA with a `workflow_job` webhook scale trigger that backs a RunnerDeployment / RunnerSet with matching runner labels. (If it finds more than one match, the event is ignored.) +3. The matched HRA adds a `capacityReservation` to its list and sets it to expire at current time + `HRA.spec.scaleUpTriggers[].duration` +4. If there are fewer replicas running than `maxReplicas`, HRA adds a replica and sets the EffectiveTime of that replica to the current time -At this point there are a few things that can happen, either the job gets allocated to the runner or the runner is left dangling due to it not being used, if the runner gets assigned the job that triggered the scale up the lifecycle looks like this: +At this point there are a few things that can happen: +1. Due to idle runners already being available, the job is assigned to one of them and the new runner is left dangling due to it not being used +2. The job gets allocated to the runner just launched +3. If there are already `maxReplicas` replicas running, the job waits for its `capacityReservation` to be assigned to one of them + +If the runner gets assigned the job that triggered the scale up, the lifecycle looks like this: 1. The new runner gets allocated the job and processes it 2. Upon the job ending GitHub sends another `workflow_job` event to ARC but with `status=completed` 3. The HRA removes the oldest capacity reservation from its `capacityReservations` and picks a runner to terminate ensuring it isn't busy via the GitHub API beforehand +If the job has to wait for a runner because there are already `maxReplicas` replicas running, the lifecycle looks like this: +1. A `capacityReservation` is added to the list, but no scale-up happens because that would exceed `maxReplicas` +2. When one of the existing runners finishes a job, GitHub sends another `workflow_job` event to ARC but with `status=completed` (or `status=canceled` if the job was cancelled) +3. The HRA removes the oldest capacity reservation from its `capacityReservations`, the oldest waiting `capacityReservation` becomes active, and its `duration` timer starts +4. GitHub assigns a waiting job to the newly available runner + If the job is cancelled before it is allocated to a runner then the lifecycle looks like this: 1. Upon the job cancellation GitHub sends another `workflow_job` event to ARC but with `status=cancelled` 2. The HRA removes the oldest capacity reservation from its `capacityReservations` and picks a runner to terminate ensuring it isn't busy via the GitHub API beforehand -If runner is never used due to other runners matching needed runner group and required runner labels are allocated the job then the lifecycle looks like this: +If the `status=completed` or `status=cancelled` is never delivered to ARC (which happens occasionally) then the lifecycle looks like this: 1. The scale trigger duration specified via `HRA.spec.scaleUpTriggers[].duration` elapses -2. The HRA thinks the capacity reservation is expired, removes it from HRA's `capacityReservations` and terminates the expired runner ensuring it isn't busy via the GitHub API beforehand +2. The HRA notices that the capacity reservation has expired, removes it from HRA's `capacityReservation` list and (unless there are `maxReplicas` running and jobs waiting) terminates the expired runner ensuring it isn't busy via the GitHub API beforehand Your `HRA.spec.scaleUpTriggers[].duration` value should be set long enough to account for the following things: -1. the potential amount of time it could take for a pod to become `Running` e.g. you need to scale horizontally because there isn't a node avaliable -2. the amount of time it takes for GitHub to allocate a job to that runner -3. the amount of time it takes for the runner to notice the allocated job and starts running it +1. The potential amount of time it could take for a pod to become `Running` e.g. you need to scale horizontally because there isn't a node available + +2. The amount of time it takes for GitHub to allocate a job to that runner + +3. The amount of time it takes for the runner to notice the allocated job and starts running it + +4. The length of time it takes for the runner to complete the job ### Install with Helm From d942ce00e8dd1545dfa18a15b9888a3e9c9567b5 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Fri, 9 Jun 2023 20:57:20 +0200 Subject: [PATCH 226/561] Trim repo/org/enterprise to 63 characters in label values (#2657) --- .../actions.github.com/resourcebuilder.go | 47 +++++++------ .../resourcebuilder_test.go | 69 +++++++++++++++++++ 2 files changed, 96 insertions(+), 20 deletions(-) diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go index 4a5bccc46b..5be1163bb6 100644 --- a/controllers/actions.github.com/resourcebuilder.go +++ b/controllers/actions.github.com/resourcebuilder.go @@ -63,26 +63,24 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1. effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners } - githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl) - if err != nil { - return nil, fmt.Errorf("failed to parse github config from url: %v", err) + labels := map[string]string{ + LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, + LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, + LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, + LabelKeyKubernetesComponent: "runner-scale-set-listener", + LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], + labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), + } + + if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil { + return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err) } autoscalingListener := &v1alpha1.AutoscalingListener{ ObjectMeta: metav1.ObjectMeta{ Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: namespace, - Labels: map[string]string{ - LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, - LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, - LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, - LabelKeyKubernetesComponent: "runner-scale-set-listener", - LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], - LabelKeyGitHubEnterprise: githubConfig.Enterprise, - LabelKeyGitHubOrganization: githubConfig.Organization, - LabelKeyGitHubRepository: githubConfig.Repository, - labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), - }, + Labels: labels, }, Spec: v1alpha1.AutoscalingListenerSpec{ GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl, @@ -323,7 +321,7 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A } runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() - newLabels := map[string]string{ + labels := map[string]string{ labelKeyRunnerSpecHash: runnerSpecHash, LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, LabelKeyKubernetesComponent: "runner-set", @@ -332,7 +330,7 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, } - if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil { + if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil { return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err) } @@ -345,7 +343,7 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A ObjectMeta: metav1.ObjectMeta{ GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-", Namespace: autoscalingRunnerSet.ObjectMeta.Namespace, - Labels: newLabels, + Labels: labels, Annotations: newAnnotations, }, Spec: v1alpha1.EphemeralRunnerSetSpec{ @@ -545,14 +543,23 @@ func applyGitHubURLLabels(url string, labels map[string]string) error { } if len(githubConfig.Enterprise) > 0 { - labels[LabelKeyGitHubEnterprise] = githubConfig.Enterprise + labels[LabelKeyGitHubEnterprise] = trimLabelValue(githubConfig.Enterprise) } if len(githubConfig.Organization) > 0 { - labels[LabelKeyGitHubOrganization] = githubConfig.Organization + labels[LabelKeyGitHubOrganization] = trimLabelValue(githubConfig.Organization) } if len(githubConfig.Repository) > 0 { - labels[LabelKeyGitHubRepository] = githubConfig.Repository + labels[LabelKeyGitHubRepository] = trimLabelValue(githubConfig.Repository) } return nil } + +const trimLabelVauleSuffix = "-trim" + +func trimLabelValue(val string) string { + if len(val) > 63 { + return val[:63-len(trimLabelVauleSuffix)] + trimLabelVauleSuffix + } + return val +} diff --git a/controllers/actions.github.com/resourcebuilder_test.go b/controllers/actions.github.com/resourcebuilder_test.go index 925ba5cf43..1a1ae44e76 100644 --- a/controllers/actions.github.com/resourcebuilder_test.go +++ b/controllers/actions.github.com/resourcebuilder_test.go @@ -2,6 +2,8 @@ package actionsgithubcom import ( "context" + "fmt" + "strings" "testing" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" @@ -91,3 +93,70 @@ func TestLabelPropagation(t *testing.T) { assert.Equal(t, ephemeralRunner.Labels[key], pod.Labels[key]) } } + +func TestGitHubURLTrimLabelValues(t *testing.T) { + enterprise := strings.Repeat("a", 64) + organization := strings.Repeat("b", 64) + repository := strings.Repeat("c", 64) + + autoscalingRunnerSet := v1alpha1.AutoscalingRunnerSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-scale-set", + Namespace: "test-ns", + Labels: map[string]string{ + LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, + LabelKeyKubernetesVersion: "0.2.0", + }, + Annotations: map[string]string{ + runnerScaleSetIdAnnotationKey: "1", + AnnotationKeyGitHubRunnerGroupName: "test-group", + }, + }, + } + + t.Run("org/repo", func(t *testing.T) { + autoscalingRunnerSet := autoscalingRunnerSet.DeepCopy() + autoscalingRunnerSet.Spec = v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: fmt.Sprintf("https://github.com/%s/%s", organization, repository), + } + + var b resourceBuilder + ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet) + require.NoError(t, err) + assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 0) + assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], 63) + assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], 63) + assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], trimLabelVauleSuffix)) + assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], trimLabelVauleSuffix)) + + listener, err := b.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil) + require.NoError(t, err) + assert.Len(t, listener.Labels[LabelKeyGitHubEnterprise], 0) + assert.Len(t, listener.Labels[LabelKeyGitHubOrganization], 63) + assert.Len(t, listener.Labels[LabelKeyGitHubRepository], 63) + assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], trimLabelVauleSuffix)) + assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], trimLabelVauleSuffix)) + }) + + t.Run("enterprise", func(t *testing.T) { + autoscalingRunnerSet := autoscalingRunnerSet.DeepCopy() + autoscalingRunnerSet.Spec = v1alpha1.AutoscalingRunnerSetSpec{ + GitHubConfigUrl: fmt.Sprintf("https://github.com/enterprises/%s", enterprise), + } + + var b resourceBuilder + ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet) + require.NoError(t, err) + assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 63) + assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], trimLabelVauleSuffix)) + assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], 0) + assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], 0) + + listener, err := b.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil) + require.NoError(t, err) + assert.Len(t, listener.Labels[LabelKeyGitHubEnterprise], 63) + assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], trimLabelVauleSuffix)) + assert.Len(t, listener.Labels[LabelKeyGitHubOrganization], 0) + assert.Len(t, listener.Labels[LabelKeyGitHubRepository], 0) + }) +} From 500ea8e9d23b3a6493f67b8320b53a4e4b6b8b99 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 15 Jun 2023 06:07:09 -0400 Subject: [PATCH 227/561] Updates: runner to v2.305.0 (#2674) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- Makefile | 2 +- runner/Makefile | 2 +- runner/VERSION | 2 +- test/e2e/e2e_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 6f0cdabe21..8c75515fdc 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ else endif DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) VERSION ?= dev -RUNNER_VERSION ?= 2.304.0 +RUNNER_VERSION ?= 2.305.0 TARGETPLATFORM ?= $(shell arch) RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_TAG ?= ${VERSION} diff --git a/runner/Makefile b/runner/Makefile index e57da33fa6..69bcb8b7d7 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) -RUNNER_VERSION ?= 2.304.0 +RUNNER_VERSION ?= 2.305.0 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.3.2 DOCKER_VERSION ?= 20.10.23 diff --git a/runner/VERSION b/runner/VERSION index 97c625c545..7d0c4ae36c 100644 --- a/runner/VERSION +++ b/runner/VERSION @@ -1,2 +1,2 @@ -RUNNER_VERSION=2.304.0 +RUNNER_VERSION=2.305.0 RUNNER_CONTAINER_HOOKS_VERSION=0.3.2 \ No newline at end of file diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 87df15124d..bd906807c7 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -36,7 +36,7 @@ var ( testResultCMNamePrefix = "test-result-" - RunnerVersion = "2.304.0" + RunnerVersion = "2.305.0" ) // If you're willing to run this test via VS Code "run test" or "debug test", From 69b76043685ca612756fc7b0206a248f8e06c271 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Thu, 15 Jun 2023 13:38:55 +0200 Subject: [PATCH 228/561] Upgrade golang.org/x/net to 0.11 (#2676) --- go.mod | 10 +++++----- go.sum | 10 ++++++++++ 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 8d00b1e8a1..401aac7e4c 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/teambition/rrule-go v1.8.2 go.uber.org/multierr v1.7.0 go.uber.org/zap v1.24.0 - golang.org/x/net v0.9.0 + golang.org/x/net v0.11.0 golang.org/x/oauth2 v0.7.0 golang.org/x/sync v0.1.0 gomodules.xyz/jsonpatch/v2 v2.2.0 @@ -90,10 +90,10 @@ require ( github.com/stretchr/objx v0.5.0 // indirect github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect - golang.org/x/crypto v0.7.0 // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/term v0.7.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/crypto v0.10.0 // indirect + golang.org/x/sys v0.9.0 // indirect + golang.org/x/term v0.9.0 // indirect + golang.org/x/text v0.10.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.6.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index cdab23f83f..ee47a30ad3 100644 --- a/go.sum +++ b/go.sum @@ -395,6 +395,8 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -462,6 +464,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -533,10 +537,14 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -546,6 +554,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From cd49d0d6afcf7e33cce4a5a38f9f5391e34386c6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Jun 2023 14:05:35 +0200 Subject: [PATCH 229/561] chore(deps): bump go.uber.org/multierr from 1.7.0 to 1.10.0 (#2400) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Nikola Jokic --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 401aac7e4c..1b3a3921ab 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/prometheus/client_golang v1.14.0 github.com/stretchr/testify v1.8.2 github.com/teambition/rrule-go v1.8.2 - go.uber.org/multierr v1.7.0 + go.uber.org/multierr v1.10.0 go.uber.org/zap v1.24.0 golang.org/x/net v0.11.0 golang.org/x/oauth2 v0.7.0 diff --git a/go.sum b/go.sum index ee47a30ad3..5c9be20769 100644 --- a/go.sum +++ b/go.sum @@ -380,8 +380,8 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= From 6dcd84f9931b4c5da7f5f3f59b14b0829a5930d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Jun 2023 14:56:53 +0200 Subject: [PATCH 230/561] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.9.0 to 2.9.1 (#2401) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Nikola Jokic --- go.mod | 8 ++++---- go.sum | 15 ++++++++------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 1b3a3921ab..90e56518d3 100644 --- a/go.mod +++ b/go.mod @@ -17,8 +17,8 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.2 github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/ginkgo/v2 v2.9.0 - github.com/onsi/gomega v1.27.2 + github.com/onsi/ginkgo/v2 v2.9.1 + github.com/onsi/gomega v1.27.3 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/stretchr/testify v1.8.2 @@ -58,7 +58,7 @@ require ( github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-github/v45 v45.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect @@ -95,7 +95,7 @@ require ( golang.org/x/term v0.9.0 // indirect golang.org/x/text v0.10.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.6.0 // indirect + golang.org/x/tools v0.7.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 5c9be20769..a10169d661 100644 --- a/go.sum +++ b/go.sum @@ -162,8 +162,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= @@ -296,12 +297,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.9.0 h1:Tugw2BKlNHTMfG+CheOITkYvk4LAh6MFOvikhGVnhE8= -github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk= +github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= +github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.27.2 h1:SKU0CXeKE/WVgIV1T61kSa3+IRE8Ekrv9rdXDwwTqnY= -github.com/onsi/gomega v1.27.2/go.mod h1:5mR3phAHpkAVIDkHEUBY6HGVsU+cpcEscrGPB4oPlZI= +github.com/onsi/gomega v1.27.3 h1:5VwIwnBY3vbBDOJrNtA4rVdiTZCsq9B5F12pvy1Drmk= +github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -605,8 +606,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From c1ce2e608ce42b4acffff256989513e69b83f496 Mon Sep 17 00:00:00 2001 From: Bassem Dghaidi <568794+Link-@users.noreply.github.com> Date: Fri, 16 Jun 2023 15:11:58 +0200 Subject: [PATCH 231/561] Apply the label "runners update" on runner update PRs (#2680) --- .github/workflows/arc-update-runners-scheduled.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/arc-update-runners-scheduled.yaml b/.github/workflows/arc-update-runners-scheduled.yaml index 1fb1153b4e..904ef77df1 100644 --- a/.github/workflows/arc-update-runners-scheduled.yaml +++ b/.github/workflows/arc-update-runners-scheduled.yaml @@ -146,4 +146,4 @@ jobs: git push -u origin HEAD - name: Create pull request - run: gh pr create -f + run: gh pr create -f -l "runners update" From 6c5f2449d8ff658accb827196e1db5f58ef1aefd Mon Sep 17 00:00:00 2001 From: Timm Drevensek Date: Tue, 20 Jun 2023 17:35:53 +0200 Subject: [PATCH 232/561] Adapt role name to prevent namespace collision (#2617) --- .../templates/_helpers.tpl | 8 ++++++++ .../templates/manager_single_namespace_watch_role.yaml | 2 +- .../manager_single_namespace_watch_role_binding.yaml | 6 +++--- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl index 468ddf6d31..1500b460b2 100644 --- a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl @@ -88,6 +88,14 @@ Create the name of the service account to use {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-rolebinding {{- end }} +{{- define "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-watch-role +{{- end }} + +{{- define "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleBinding" -}} +{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-watch-rolebinding +{{- end }} + {{- define "gha-runner-scale-set-controller.managerListenerRoleName" -}} {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-role {{- end }} diff --git a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml index f0f653d766..ac5a2d93a7 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }} + name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" . }} namespace: {{ .Values.flags.watchSingleNamespace }} rules: - apiGroups: diff --git a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role_binding.yaml b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role_binding.yaml index 3edd0c61ec..679233581b 100644 --- a/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role_binding.yaml +++ b/charts/gha-runner-scale-set-controller/templates/manager_single_namespace_watch_role_binding.yaml @@ -2,14 +2,14 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }} + name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleBinding" . }} namespace: {{ .Values.flags.watchSingleNamespace }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }} + name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" . }} subjects: - kind: ServiceAccount name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }} namespace: {{ .Release.Namespace }} -{{- end }} \ No newline at end of file +{{- end }} From 74b622ae05e49f64efc167b8ea477bdb3526fcbf Mon Sep 17 00:00:00 2001 From: kahirokunn Date: Wed, 21 Jun 2023 20:50:02 +0900 Subject: [PATCH 233/561] chore(gha-runner-scale-set): update indentation of initContainers (#2638) --- .../templates/autoscalingrunnerset.yaml | 2 +- .../tests/template_test.go | 44 +++++++++++++++++++ .../values_dind_extra_init_containers.yaml | 17 +++++++ 3 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 charts/gha-runner-scale-set/tests/values_dind_extra_init_containers.yaml diff --git a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml index 9e52e24952..a81f5647bf 100644 --- a/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml +++ b/charts/gha-runner-scale-set/templates/autoscalingrunnerset.yaml @@ -119,7 +119,7 @@ spec: {{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }} {{- end }} {{- with .Values.template.spec.initContainers }} - {{- toYaml . | nindent 8 }} + {{- toYaml . | nindent 6 }} {{- end }} {{- end }} containers: diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index d1d1c7776b..e3369dfa82 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -694,6 +694,50 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraVolumes(t *testing.T) { assert.Equal(t, "/data", ars.Spec.Template.Spec.Volumes[2].HostPath.Path, "Volume host path should be /data") } +func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraInitContainers(t *testing.T) { + t.Parallel() + + // Path to the helm chart we will test + helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") + require.NoError(t, err) + + testValuesPath, err := filepath.Abs("../tests/values_dind_extra_init_containers.yaml") + require.NoError(t, err) + + releaseName := "test-runners" + namespaceName := "test-" + strings.ToLower(random.UniqueId()) + + options := &helm.Options{ + Logger: logger.Discard, + SetValues: map[string]string{ + "controllerServiceAccount.name": "arc", + "controllerServiceAccount.namespace": "arc-system", + }, + ValuesFiles: []string{testValuesPath}, + KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), + } + + output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) + + var ars v1alpha1.AutoscalingRunnerSet + helm.UnmarshalK8SYaml(t, output, &ars) + + assert.Len(t, ars.Spec.Template.Spec.InitContainers, 3, "InitContainers should be 3") + assert.Equal(t, "kube-init", ars.Spec.Template.Spec.InitContainers[1].Name, "InitContainers[1] Name should be kube-init") + assert.Equal(t, "runner-image:latest", ars.Spec.Template.Spec.InitContainers[1].Image, "InitContainers[1] Image should be runner-image:latest") + assert.Equal(t, "sudo", ars.Spec.Template.Spec.InitContainers[1].Command[0], "InitContainers[1] Command[0] should be sudo") + assert.Equal(t, "chown", ars.Spec.Template.Spec.InitContainers[1].Command[1], "InitContainers[1] Command[1] should be chown") + assert.Equal(t, "-R", ars.Spec.Template.Spec.InitContainers[1].Command[2], "InitContainers[1] Command[2] should be -R") + assert.Equal(t, "1001:123", ars.Spec.Template.Spec.InitContainers[1].Command[3], "InitContainers[1] Command[3] should be 1001:123") + assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].Command[4], "InitContainers[1] Command[4] should be /home/runner/_work") + assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].Name, "InitContainers[1] VolumeMounts[0] Name should be work") + assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath, "InitContainers[1] VolumeMounts[0] MountPath should be /home/runner/_work") + + assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Name, "InitContainers[2] Name should be ls") + assert.Equal(t, "ubuntu:latest", ars.Spec.Template.Spec.InitContainers[2].Image, "InitContainers[2] Image should be ubuntu:latest") + assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls") +} + func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) { t.Parallel() diff --git a/charts/gha-runner-scale-set/tests/values_dind_extra_init_containers.yaml b/charts/gha-runner-scale-set/tests/values_dind_extra_init_containers.yaml new file mode 100644 index 0000000000..c556e495ba --- /dev/null +++ b/charts/gha-runner-scale-set/tests/values_dind_extra_init_containers.yaml @@ -0,0 +1,17 @@ +githubConfigUrl: https://github.com/actions/actions-runner-controller +githubConfigSecret: + github_token: test +template: + spec: + initContainers: + - name: kube-init + image: runner-image:latest + command: ["sudo", "chown", "-R", "1001:123", "/home/runner/_work"] + volumeMounts: + - name: work + mountPath: /home/runner/_work + - name: ls + image: ubuntu:latest + command: ["ls"] +containerMode: + type: dind From 141846717941d10b98fd7205cf36c93c3123056b Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Wed, 21 Jun 2023 14:43:03 +0200 Subject: [PATCH 234/561] fix chart test (#2694) --- .../gha-runner-scale-set-controller/tests/template_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index e972bd07ab..73c61699d1 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -868,7 +868,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) { var managerSingleNamespaceWatchRole rbacv1.Role helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRole) - assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-watch-role", managerSingleNamespaceWatchRole.Name) assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace) assert.Equal(t, 14, len(managerSingleNamespaceWatchRole.Rules)) } @@ -907,9 +907,9 @@ func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) { var managerSingleNamespaceWatchRoleBinding rbacv1.RoleBinding helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRoleBinding) - assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceWatchRoleBinding.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-watch-rolebinding", managerSingleNamespaceWatchRoleBinding.Name) assert.Equal(t, "demo", managerSingleNamespaceWatchRoleBinding.Namespace) - assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRoleBinding.RoleRef.Name) + assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-watch-role", managerSingleNamespaceWatchRoleBinding.RoleRef.Name) assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceWatchRoleBinding.Subjects[0].Name) assert.Equal(t, namespaceName, managerSingleNamespaceWatchRoleBinding.Subjects[0].Namespace) } From c74a2c9f50414bf6a80c0c92a2c4e1cb249ae42a Mon Sep 17 00:00:00 2001 From: Rose Soriano <80055707+rosesori@users.noreply.github.com> Date: Fri, 23 Jun 2023 05:54:13 -0700 Subject: [PATCH 235/561] Fix more broken links in docs (#2473) Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com> --- docs/authenticating-to-the-github-api.md | 4 ++-- docs/automatically-scaling-runners.md | 2 +- docs/using-arc-across-organizations.md | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/authenticating-to-the-github-api.md b/docs/authenticating-to-the-github-api.md index 80dcd301b0..61370335ee 100644 --- a/docs/authenticating-to-the-github-api.md +++ b/docs/authenticating-to-the-github-api.md @@ -22,7 +22,7 @@ _Note: Links are provided further down to create an app for your logged in user * Actions (read) * Administration (read / write) -* Checks (read) (if you are going to use [Webhook Driven Scaling](#webhook-driven-scaling)) +* Checks (read) (if you are going to use [Webhook Driven Scaling](automatically-scaling-runners.md#webhook-driven-scaling)) * Metadata (read) **Required Permissions for Organization Runners:**
@@ -39,7 +39,7 @@ _Note: All API routes mapped to their permissions can be found [here](https://do **Subscribe to events** -At this point you have a choice of configuring a webhook, a webhook is needed if you are going to use [webhook driven scaling](#webhook-driven-scaling). The webhook can be configured centrally in the GitHub app itself or separately. In either case you need to subscribe to the `Workflow Job` event. +At this point you have a choice of configuring a webhook, a webhook is needed if you are going to use [webhook driven scaling](automatically-scaling-runners.md#webhook-driven-scaling). The webhook can be configured centrally in the GitHub app itself or separately. In either case you need to subscribe to the `Workflow Job` event. --- diff --git a/docs/automatically-scaling-runners.md b/docs/automatically-scaling-runners.md index 1049e87a30..4d7dc8fd7d 100644 --- a/docs/automatically-scaling-runners.md +++ b/docs/automatically-scaling-runners.md @@ -133,7 +133,7 @@ The `HorizontalRunnerAutoscaler` will poll GitHub for the number of runners in t **Benefits of this metric** 1. Supports named repositories server-side the same as the `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric [#313](https://github.com/actions/actions-runner-controller/pull/313) 2. Supports GitHub organization wide scaling without maintaining an explicit list of repositories, this is especially useful for those that are working at a larger scale. [#223](https://github.com/actions/actions-runner-controller/pull/223) -3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [GitHub labels](#runner-labels) +3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [GitHub labels](using-arc-runners-in-a-workflow.md#runner-labels) 4. Supports scaling desired runner count on both a percentage increase / decrease basis as well as on a fixed increase / decrease count basis [#223](https://github.com/actions/actions-runner-controller/pull/223) [#315](https://github.com/actions/actions-runner-controller/pull/315) **Drawbacks of this metric** diff --git a/docs/using-arc-across-organizations.md b/docs/using-arc-across-organizations.md index 8b02433123..4a67697910 100644 --- a/docs/using-arc-across-organizations.md +++ b/docs/using-arc-across-organizations.md @@ -4,7 +4,7 @@ > This feature requires controller version => [v0.26.0](https://github.com/actions/actions-runner-controller/releases/tag/v0.26.0) -In a large enterprise, there might be many GitHub organizations that requires self-hosted runners. Previously, the only way to provide ARC-managed self-hosted runners in such environment was [Deploying Multiple Controllers](#deploying-multiple-controllers), which incurs overhead due to it requires one ARC installation per GitHub organization. +In a large enterprise, there might be many GitHub organizations that requires self-hosted runners. Previously, the only way to provide ARC-managed self-hosted runners in such environment was [Deploying Multiple Controllers](deploying-arc-runners.md#deploying-multiple-controllers), which incurs overhead due to it requires one ARC installation per GitHub organization. With multitenancy, you can let ARC manage self-hosted runners across organizations. It's enabled by default and the only thing you need to start using it is to set the `spec.githubAPICredentialsFrom.secretRef.name` fields for the following resources: @@ -58,4 +58,4 @@ spec: when and which varying ARC component(`horizontalrunnerautoscaler-controller`, `runnerdeployment-controller`, `runnerreplicaset-controller`, `runner-controller` or `runnerpod-controller`) makes specific API calls. > Just don't be surprised you have to repeat `githubAPICredentialsFrom.secretRef.name` settings among two resources! -Please refer to [Deploying Using GitHub App Authentication](#deploying-using-github-app-authentication) for how you could create the Kubernetes secret containing GitHub App credentials. \ No newline at end of file +Please refer to [Deploying Using GitHub App Authentication](authenticating-to-the-github-api.md#deploying-using-github-app-authentication) for how you could create the Kubernetes secret containing GitHub App credentials. \ No newline at end of file From b91a5fcd7efd49b2b05a8233e1a4ba0b8cf5680a Mon Sep 17 00:00:00 2001 From: kahirokunn Date: Tue, 27 Jun 2023 19:32:47 +0900 Subject: [PATCH 236/561] chore: remove 16 characters from -service-account (#2567) Signed-off-by: kahirokunn Co-authored-by: Nikola Jokic --- charts/gha-runner-scale-set/templates/_helpers.tpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/gha-runner-scale-set/templates/_helpers.tpl b/charts/gha-runner-scale-set/templates/_helpers.tpl index 202bb04d58..aa75d0701e 100644 --- a/charts/gha-runner-scale-set/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set/templates/_helpers.tpl @@ -58,7 +58,7 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{- define "gha-runner-scale-set.noPermissionServiceAccountName" -}} -{{- include "gha-runner-scale-set.fullname" . }}-no-permission-service-account +{{- include "gha-runner-scale-set.fullname" . }}-no-permission {{- end }} {{- define "gha-runner-scale-set.kubeModeRoleName" -}} @@ -70,7 +70,7 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}} -{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account +{{- include "gha-runner-scale-set.fullname" . }}-kube-mode {{- end }} {{- define "gha-runner-scale-set.dind-init-container" -}} From 8ccb550a27a0ae830ebecf768c338d0f2d5b96e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:14:31 +0200 Subject: [PATCH 237/561] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.9.1 to 2.11.0 (#2689) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Nikola Jokic --- go.mod | 12 ++++++------ go.sum | 34 +++++++++++++--------------------- 2 files changed, 19 insertions(+), 27 deletions(-) diff --git a/go.mod b/go.mod index 90e56518d3..ff30eb96a1 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 github.com/davecgh/go-spew v1.1.1 github.com/evanphx/json-patch v4.12.0+incompatible - github.com/go-logr/logr v1.2.3 + github.com/go-logr/logr v1.2.4 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/google/go-cmp v0.5.9 github.com/google/go-github/v52 v52.0.0 @@ -17,8 +17,8 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.2 github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/ginkgo/v2 v2.9.1 - github.com/onsi/gomega v1.27.3 + github.com/onsi/ginkgo/v2 v2.11.0 + github.com/onsi/gomega v1.27.8 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/stretchr/testify v1.8.2 @@ -27,7 +27,7 @@ require ( go.uber.org/zap v1.24.0 golang.org/x/net v0.11.0 golang.org/x/oauth2 v0.7.0 - golang.org/x/sync v0.1.0 + golang.org/x/sync v0.2.0 gomodules.xyz/jsonpatch/v2 v2.2.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.26.2 @@ -55,7 +55,7 @@ require ( github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.19.14 // indirect github.com/go-sql-driver/mysql v1.4.1 // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -95,7 +95,7 @@ require ( golang.org/x/term v0.9.0 // indirect golang.org/x/text v0.10.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/tools v0.9.3 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index a10169d661..210413c129 100644 --- a/go.sum +++ b/go.sum @@ -111,8 +111,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -126,8 +126,9 @@ github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/ github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -297,12 +298,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= -github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.27.3 h1:5VwIwnBY3vbBDOJrNtA4rVdiTZCsq9B5F12pvy1Drmk= -github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= +github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= +github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -394,8 +395,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -428,6 +427,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -463,8 +463,6 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -486,8 +484,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -536,14 +534,10 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -553,8 +547,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -606,8 +598,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= +golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From cc27913dc22152a918a48c0987d386a8d7ab55e2 Mon Sep 17 00:00:00 2001 From: Lars Lange <9141483+Langleu@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:24:49 +0200 Subject: [PATCH 238/561] fix: template test of service account (#2705) --- charts/gha-runner-scale-set/tests/template_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/charts/gha-runner-scale-set/tests/template_test.go b/charts/gha-runner-scale-set/tests/template_test.go index e3369dfa82..41971cd0a2 100644 --- a/charts/gha-runner-scale-set/tests/template_test.go +++ b/charts/gha-runner-scale-set/tests/template_test.go @@ -190,13 +190,13 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &serviceAccount) assert.Equal(t, namespaceName, serviceAccount.Namespace) - assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", serviceAccount.Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission", serviceAccount.Name) output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) var ars v1alpha1.AutoscalingRunnerSet helm.UnmarshalK8SYaml(t, output, &ars) - assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName) + assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission", ars.Spec.Template.Spec.ServiceAccountName) assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) // no finalizer protections in place } @@ -227,7 +227,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { helm.UnmarshalK8SYaml(t, output, &serviceAccount) assert.Equal(t, namespaceName, serviceAccount.Namespace) - assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode", serviceAccount.Name) assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0]) output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"}) @@ -253,7 +253,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { assert.Equal(t, namespaceName, roleBinding.Namespace) assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role-binding", roleBinding.Name) assert.Len(t, roleBinding.Subjects, 1) - assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name) + assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode", roleBinding.Subjects[0].Name) assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace) assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name) assert.Equal(t, "Role", roleBinding.RoleRef.Kind) @@ -263,7 +263,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { var ars v1alpha1.AutoscalingRunnerSet helm.UnmarshalK8SYaml(t, output, &ars) - expectedServiceAccountName := "test-runners-gha-runner-scale-set-kube-mode-service-account" + expectedServiceAccountName := "test-runners-gha-runner-scale-set-kube-mode" assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName) assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) } @@ -1890,7 +1890,7 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-runner-scale-set-github-secret", actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-runner-scale-set-manager-role", actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-runner-scale-set-manager-role-binding", - actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-runner-scale-set-kube-mode-service-account", + actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-runner-scale-set-kube-mode", actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-runner-scale-set-kube-mode-role", actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-runner-scale-set-kube-mode-role-binding", } From 063d21ca834976ab141be3290bedcc394c80d8c5 Mon Sep 17 00:00:00 2001 From: marcin-motyl <118439707+marcin-motyl@users.noreply.github.com> Date: Sat, 1 Jul 2023 06:59:44 +0200 Subject: [PATCH 239/561] Fix serviceMonitor labels in actionsMetrics (#2682) --- .../templates/actionsmetrics.servicemonitor.yaml.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/actions-runner-controller/templates/actionsmetrics.servicemonitor.yaml.yml b/charts/actions-runner-controller/templates/actionsmetrics.servicemonitor.yaml.yml index 25e72f1324..d25400fcaa 100644 --- a/charts/actions-runner-controller/templates/actionsmetrics.servicemonitor.yaml.yml +++ b/charts/actions-runner-controller/templates/actionsmetrics.servicemonitor.yaml.yml @@ -4,7 +4,7 @@ kind: ServiceMonitor metadata: labels: {{- include "actions-runner-controller.labels" . | nindent 4 }} - {{- with .Values.actionsMetricsServer.serviceMonitorLabels }} + {{- with .Values.actionsMetrics.serviceMonitorLabels }} {{- toYaml . | nindent 4 }} {{- end }} name: {{ include "actions-runner-controller-actions-metrics-server.serviceMonitorName" . }} From 7621752bfc4e60ccef508abe4c14350d5d3188f3 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Mon, 3 Jul 2023 18:52:04 +0900 Subject: [PATCH 240/561] chore: Set build version on make-runscaleset (#2713) --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 8c75515fdc..eb118baf21 100644 --- a/Makefile +++ b/Makefile @@ -95,7 +95,8 @@ run: generate fmt vet manifests run-scaleset: generate fmt vet CONTROLLER_MANAGER_POD_NAMESPACE=default \ CONTROLLER_MANAGER_CONTAINER_IMAGE="${DOCKER_IMAGE_NAME}:${VERSION}" \ - go run ./main.go --auto-scaling-runner-set-only + go run -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=$(VERSION)'" \ + ./main.go --auto-scaling-runner-set-only # Install CRDs into a cluster install: manifests From baab5ccbdafb6d729ffc36f781f60a17ffee3a07 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Wed, 5 Jul 2023 21:06:42 +0200 Subject: [PATCH 241/561] Add configurable log format to values.yaml and propagate it to listener (#2686) --- .../templates/deployment.yaml | 3 + .../tests/template_test.go | 72 ++++++++++++------- .../values.yaml | 15 ++-- cmd/githubrunnerscalesetlistener/main.go | 24 +++++-- controllers/actions.github.com/constants.go | 11 ++- .../actions.github.com/resourcebuilder.go | 30 ++++++++ main.go | 8 ++- 7 files changed, 124 insertions(+), 39 deletions(-) diff --git a/charts/gha-runner-scale-set-controller/templates/deployment.yaml b/charts/gha-runner-scale-set-controller/templates/deployment.yaml index dc0b88a7eb..41034df8de 100644 --- a/charts/gha-runner-scale-set-controller/templates/deployment.yaml +++ b/charts/gha-runner-scale-set-controller/templates/deployment.yaml @@ -56,6 +56,9 @@ spec: {{- with .Values.flags.logLevel }} - "--log-level={{ . }}" {{- end }} + {{- with .Values.flags.logFormat }} + - "--log-format={{ . }}" + {{- end }} {{- with .Values.flags.watchSingleNamespace }} - "--watch-single-namespace={{ . }}" {{- end }} diff --git a/charts/gha-runner-scale-set-controller/tests/template_test.go b/charts/gha-runner-scale-set-controller/tests/template_test.go index 73c61699d1..f7107fbb91 100644 --- a/charts/gha-runner-scale-set-controller/tests/template_test.go +++ b/charts/gha-runner-scale-set-controller/tests/template_test.go @@ -244,6 +244,7 @@ func TestTemplate_CreateManagerListenerRole(t *testing.T) { assert.Equal(t, namespaceName, managerListenerRole.Namespace, "Role should have a namespace") assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRole.Name) + assert.Equal(t, 4, len(managerListenerRole.Rules)) assert.Equal(t, "pods", managerListenerRole.Rules[0].Resources[0]) assert.Equal(t, "pods/status", managerListenerRole.Rules[1].Resources[0]) @@ -356,10 +357,13 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3) - assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) - assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1]) - assert.Equal(t, "--update-strategy=immediate", deployment.Spec.Template.Spec.Containers[0].Args[2]) + expectedArgs := []string{ + "--auto-scaling-runner-set-only", + "--log-level=debug", + "--log-format=text", + "--update-strategy=immediate", + } + assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) @@ -420,6 +424,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { "affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar", "priorityClassName": "test-priority-class", "flags.updateStrategy": "eventual", + "flags.logLevel": "info", + "flags.logFormat": "json", }, KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), } @@ -484,11 +490,15 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4) - assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) - assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1]) - assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) - assert.Equal(t, "--update-strategy=eventual", deployment.Spec.Template.Spec.Containers[0].Args[3]) + expectArgs := []string{ + "--auto-scaling-runner-set-only", + "--auto-scaler-image-pull-secrets=dockerhub", + "--log-level=info", + "--log-format=json", + "--update-strategy=eventual", + } + + assert.ElementsMatch(t, expectArgs, deployment.Spec.Template.Spec.Containers[0].Args) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) @@ -605,12 +615,16 @@ func TestTemplate_EnableLeaderElection(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 5) - assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) - assert.Equal(t, "--enable-leader-election", deployment.Spec.Template.Spec.Containers[0].Args[1]) - assert.Equal(t, "--leader-election-id=test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.Containers[0].Args[2]) - assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[3]) - assert.Equal(t, "--update-strategy=immediate", deployment.Spec.Template.Spec.Containers[0].Args[4]) + expectedArgs := []string{ + "--auto-scaling-runner-set-only", + "--enable-leader-election", + "--leader-election-id=test-arc-gha-runner-scale-set-controller", + "--log-level=debug", + "--log-format=text", + "--update-strategy=immediate", + } + + assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args) } func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) { @@ -639,11 +653,15 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) { assert.Equal(t, namespaceName, deployment.Namespace) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4) - assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) - assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1]) - assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2]) - assert.Equal(t, "--update-strategy=immediate", deployment.Spec.Template.Spec.Containers[0].Args[3]) + expectedArgs := []string{ + "--auto-scaling-runner-set-only", + "--auto-scaler-image-pull-secrets=dockerhub,ghcr", + "--log-level=debug", + "--log-format=text", + "--update-strategy=immediate", + } + + assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args) } func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) { @@ -721,11 +739,15 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) { assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1) assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0]) - assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4) - assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0]) - assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1]) - assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2]) - assert.Equal(t, "--update-strategy=immediate", deployment.Spec.Template.Spec.Containers[0].Args[3]) + expectedArgs := []string{ + "--auto-scaling-runner-set-only", + "--log-level=debug", + "--log-format=text", + "--watch-single-namespace=demo", + "--update-strategy=immediate", + } + + assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3) assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name) diff --git a/charts/gha-runner-scale-set-controller/values.yaml b/charts/gha-runner-scale-set-controller/values.yaml index b0405609cc..1da0aac631 100644 --- a/charts/gha-runner-scale-set-controller/values.yaml +++ b/charts/gha-runner-scale-set-controller/values.yaml @@ -79,6 +79,9 @@ flags: ## Log level can be set here with one of the following values: "debug", "info", "warn", "error". ## Defaults to "debug". logLevel: "debug" + ## Log format can be set with one of the following values: "text", "json" + ## Defaults to "text" + logFormat: "text" ## Restricts the controller to only watch resources in the desired namespace. ## Defaults to watch all namespaces when unset. @@ -88,14 +91,14 @@ flags: ## ## The srategies available are: ## - "immediate": (default) The controller will immediately apply the change causing the - ## recreation of the listener and ephemeral runner set. This can lead to an + ## recreation of the listener and ephemeral runner set. This can lead to an ## overprovisioning of runners, if there are pending / running jobs. This should not - ## be a problem at a small scale, but it could lead to a significant increase of + ## be a problem at a small scale, but it could lead to a significant increase of ## resources if you have a lot of jobs running concurrently. - ## - ## - "eventual": The controller will remove the listener and ephemeral runner set - ## immediately, but will not recreate them (to apply changes) until all + ## + ## - "eventual": The controller will remove the listener and ephemeral runner set + ## immediately, but will not recreate them (to apply changes) until all ## pending / running jobs have completed. ## This can lead to a longer time to apply the change but it will ensure ## that you don't have any overprovisioning of runners. - updateStrategy: "immediate" \ No newline at end of file + updateStrategy: "immediate" diff --git a/cmd/githubrunnerscalesetlistener/main.go b/cmd/githubrunnerscalesetlistener/main.go index 64abf6cfad..deff9a6812 100644 --- a/cmd/githubrunnerscalesetlistener/main.go +++ b/cmd/githubrunnerscalesetlistener/main.go @@ -46,18 +46,30 @@ type RunnerScaleSetListenerConfig struct { MinRunners int `split_words:"true"` RunnerScaleSetId int `split_words:"true"` ServerRootCA string `split_words:"true"` + LogLevel string `split_words:"true"` + LogFormat string `split_words:"true"` } func main() { - logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err) + var rc RunnerScaleSetListenerConfig + if err := envconfig.Process("github", &rc); err != nil { + fmt.Fprintf(os.Stderr, "Error: processing environment variables for RunnerScaleSetListenerConfig: %v\n", err) os.Exit(1) } - var rc RunnerScaleSetListenerConfig - if err := envconfig.Process("github", &rc); err != nil { - logger.Error(err, "Error: processing environment variables for RunnerScaleSetListenerConfig") + logLevel := string(logging.LogLevelDebug) + if rc.LogLevel != "" { + logLevel = rc.LogLevel + } + + logFormat := string(logging.LogFormatText) + if rc.LogFormat != "" { + logFormat = rc.LogFormat + } + + logger, err := logging.NewLogger(logLevel, logFormat) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err) os.Exit(1) } diff --git a/controllers/actions.github.com/constants.go b/controllers/actions.github.com/constants.go index 339c39d9e0..5e8445ce0e 100644 --- a/controllers/actions.github.com/constants.go +++ b/controllers/actions.github.com/constants.go @@ -1,6 +1,9 @@ package actionsgithubcom -import corev1 "k8s.io/api/core/v1" +import ( + "github.com/actions/actions-runner-controller/logging" + corev1 "k8s.io/api/core/v1" +) const ( LabelKeyRunnerTemplateHash = "runner-template-hash" @@ -60,5 +63,11 @@ const ( // to the listener when ImagePullPolicy is not specified const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent +// DefaultScaleSetListenerLogLevel is the default log level applied +const DefaultScaleSetListenerLogLevel = string(logging.LogLevelDebug) + +// DefaultScaleSetListenerLogFormat is the default log format applied +const DefaultScaleSetListenerLogFormat = string(logging.LogFormatText) + // ownerKey is field selector matching the owner name of a particular resource const resourceOwnerKey = ".metadata.controller" diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go index 5be1163bb6..d9369bd53e 100644 --- a/controllers/actions.github.com/resourcebuilder.go +++ b/controllers/actions.github.com/resourcebuilder.go @@ -10,6 +10,7 @@ import ( "github.com/actions/actions-runner-controller/build" "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/hash" + "github.com/actions/actions-runner-controller/logging" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -46,6 +47,27 @@ func SetListenerImagePullPolicy(pullPolicy string) bool { } } +var scaleSetListenerLogLevel = DefaultScaleSetListenerLogLevel +var scaleSetListenerLogFormat = DefaultScaleSetListenerLogFormat + +func SetListenerLoggingParameters(level string, format string) bool { + switch level { + case logging.LogLevelDebug, logging.LogLevelInfo, logging.LogLevelWarn, logging.LogLevelError: + default: + return false + } + + switch format { + case logging.LogFormatJSON, logging.LogFormatText: + default: + return false + } + + scaleSetListenerLogLevel = level + scaleSetListenerLogFormat = format + return true +} + type resourceBuilder struct{} func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) { @@ -128,6 +150,14 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A Name: "GITHUB_RUNNER_SCALE_SET_ID", Value: strconv.Itoa(autoscalingListener.Spec.RunnerScaleSetId), }, + { + Name: "GITHUB_RUNNER_LOG_LEVEL", + Value: scaleSetListenerLogLevel, + }, + { + Name: "GITHUB_RUNNER_LOG_FORMAT", + Value: scaleSetListenerLogFormat, + }, } listenerEnv = append(listenerEnv, envs...) diff --git a/main.go b/main.go index 9cc1bbdd46..689a71b3b8 100644 --- a/main.go +++ b/main.go @@ -182,12 +182,18 @@ func main() { } listenerPullPolicy := os.Getenv("CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY") - if ok := actionsgithubcom.SetListenerImagePullPolicy(listenerPullPolicy); ok { + if actionsgithubcom.SetListenerImagePullPolicy(listenerPullPolicy) { log.Info("AutoscalingListener image pull policy changed", "ImagePullPolicy", listenerPullPolicy) } else { log.Info("Using default AutoscalingListener image pull policy", "ImagePullPolicy", actionsgithubcom.DefaultScaleSetListenerImagePullPolicy) } + if actionsgithubcom.SetListenerLoggingParameters(logLevel, logFormat) { + log.Info("AutoscalingListener logging parameters changed", "LogLevel", logLevel, "LogFormat", logFormat) + } else { + log.Info("Using default AutoscalingListener logging parameters", "LogLevel", actionsgithubcom.DefaultScaleSetListenerLogLevel, "LogFormat", actionsgithubcom.DefaultScaleSetListenerLogFormat) + } + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, NewCache: newCache, From 4434be9744d5c3a9f76c2ed345203a29d8438b25 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Wed, 5 Jul 2023 21:09:07 +0200 Subject: [PATCH 242/561] Add status check before deserializing runner-registration response (#2699) --- github/actions/client.go | 10 +++++ github/actions/github_api_request_test.go | 22 +++++++++++ github/actions/testserver/server.go | 47 +++++++++++++++++++---- 3 files changed, 71 insertions(+), 8 deletions(-) diff --git a/github/actions/client.go b/github/actions/client.go index 51fe75d8e3..bea3e9b289 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -921,6 +921,16 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis } defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode > 299 { + registrationErr := fmt.Errorf("unexpected response from Actions service during registration call: %v", resp.StatusCode) + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%v - %v", registrationErr, err) + } + return nil, fmt.Errorf("%v - %v", registrationErr, string(body)) + } + var actionsServiceAdminConnection *ActionsServiceAdminConnection if err := json.NewDecoder(resp.Body).Decode(&actionsServiceAdminConnection); err != nil { return nil, err diff --git a/github/actions/github_api_request_test.go b/github/actions/github_api_request_test.go index fef7b58f41..da43a1b3bc 100644 --- a/github/actions/github_api_request_test.go +++ b/github/actions/github_api_request_test.go @@ -122,6 +122,28 @@ func TestNewActionsServiceRequest(t *testing.T) { assert.Equal(t, "Bearer "+newToken, req.Header.Get("Authorization")) }) + t.Run("admin token refresh failure", func(t *testing.T) { + newToken := defaultActionsToken(t) + errMessage := `{"message":"test"}` + unauthorizedHandler := func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte(errMessage)) + } + server := testserver.New(t, nil, testserver.WithActionsToken("random-token"), testserver.WithActionsToken(newToken), testserver.WithActionsRegistrationTokenHandler(unauthorizedHandler)) + client, err := actions.NewClient(server.ConfigURLForOrg("my-org"), defaultCreds) + require.NoError(t, err) + expiringToken := "expiring-token" + expiresAt := time.Now().Add(59 * time.Second) + client.ActionsServiceAdminToken = expiringToken + client.ActionsServiceAdminTokenExpiresAt = expiresAt + _, err = client.NewActionsServiceRequest(ctx, http.MethodGet, "my-path", nil) + require.Error(t, err) + assert.Contains(t, err.Error(), errMessage) + assert.Equal(t, client.ActionsServiceAdminToken, expiringToken) + assert.Equal(t, client.ActionsServiceAdminTokenExpiresAt, expiresAt) + }) + t.Run("token is currently valid", func(t *testing.T) { tokenThatShouldNotBeFetched := defaultActionsToken(t) server := testserver.New(t, nil, testserver.WithActionsToken(tokenThatShouldNotBeFetched)) diff --git a/github/actions/testserver/server.go b/github/actions/testserver/server.go index 49ff7073fb..e5148e413f 100644 --- a/github/actions/testserver/server.go +++ b/github/actions/testserver/server.go @@ -35,6 +35,8 @@ func NewUnstarted(t ginkgo.GinkgoTInterface, handler http.Handler, options ...ac server.Close() }) + server.setDefaults(t) + for _, option := range options { option(server) } @@ -42,18 +44,13 @@ func NewUnstarted(t ginkgo.GinkgoTInterface, handler http.Handler, options ...ac h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // handle getRunnerRegistrationToken if strings.HasSuffix(r.URL.Path, "/runners/registration-token") { - w.WriteHeader(http.StatusCreated) - w.Write([]byte(`{"token":"token"}`)) + server.runnerRegistrationTokenHandler(w, r) return } // handle getActionsServiceAdminConnection if strings.HasSuffix(r.URL.Path, "/actions/runner-registration") { - if server.token == "" { - server.token = DefaultActionsToken(t) - } - - w.Write([]byte(`{"url":"` + s.URL + `/tenant/123/","token":"` + server.token + `"}`)) + server.actionRegistrationTokenHandler(w, r) return } @@ -73,10 +70,44 @@ func WithActionsToken(token string) actionsServerOption { } } +func WithRunnerRegistrationTokenHandler(h http.HandlerFunc) actionsServerOption { + return func(s *actionsServer) { + s.runnerRegistrationTokenHandler = h + } +} + +func WithActionsRegistrationTokenHandler(h http.HandlerFunc) actionsServerOption { + return func(s *actionsServer) { + s.actionRegistrationTokenHandler = h + } +} + type actionsServer struct { *httptest.Server - token string + token string + runnerRegistrationTokenHandler http.HandlerFunc + actionRegistrationTokenHandler http.HandlerFunc +} + +func (s *actionsServer) setDefaults(t ginkgo.GinkgoTInterface) { + if s.runnerRegistrationTokenHandler == nil { + s.runnerRegistrationTokenHandler = func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"token":"token"}`)) + } + } + + if s.actionRegistrationTokenHandler == nil { + s.actionRegistrationTokenHandler = func(w http.ResponseWriter, r *http.Request) { + if s.token == "" { + s.token = DefaultActionsToken(t) + } + + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"url":"` + s.URL + `/tenant/123/","token":"` + s.token + `"}`)) + } + } } func (s *actionsServer) ConfigURLForOrg(org string) string { From 76a534a144d62ec1c8f3ad92d2baeacced408ded Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jul 2023 12:41:55 +0200 Subject: [PATCH 243/561] chore(deps): bump github.com/stretchr/testify from 1.8.2 to 1.8.4 (#2716) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Nikola Jokic --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ff30eb96a1..a0be96e515 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/onsi/gomega v1.27.8 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 github.com/teambition/rrule-go v1.8.2 go.uber.org/multierr v1.10.0 go.uber.org/zap v1.24.0 diff --git a/go.sum b/go.sum index 210413c129..fb502a7a87 100644 --- a/go.sum +++ b/go.sum @@ -362,8 +362,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/teambition/rrule-go v1.8.2 h1:lIjpjvWTj9fFUZCmuoVDrKVOtdiyzbzc93qTmRVe/J8= github.com/teambition/rrule-go v1.8.2/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4= github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= From 67e4eaffe3e5e1a037ec21ddfc675eec45f52d95 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 7 Jul 2023 14:48:40 +0200 Subject: [PATCH 244/561] Updates: runner to v2.306.0 (#2727) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- Makefile | 2 +- runner/Makefile | 2 +- runner/VERSION | 2 +- test/e2e/e2e_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index eb118baf21..0b37045223 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ else endif DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) VERSION ?= dev -RUNNER_VERSION ?= 2.305.0 +RUNNER_VERSION ?= 2.306.0 TARGETPLATFORM ?= $(shell arch) RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_TAG ?= ${VERSION} diff --git a/runner/Makefile b/runner/Makefile index 69bcb8b7d7..4f79b64f13 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) -RUNNER_VERSION ?= 2.305.0 +RUNNER_VERSION ?= 2.306.0 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.3.2 DOCKER_VERSION ?= 20.10.23 diff --git a/runner/VERSION b/runner/VERSION index 7d0c4ae36c..4220d5edd3 100644 --- a/runner/VERSION +++ b/runner/VERSION @@ -1,2 +1,2 @@ -RUNNER_VERSION=2.305.0 +RUNNER_VERSION=2.306.0 RUNNER_CONTAINER_HOOKS_VERSION=0.3.2 \ No newline at end of file diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index bd906807c7..7e9b6790ee 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -36,7 +36,7 @@ var ( testResultCMNamePrefix = "test-result-" - RunnerVersion = "2.305.0" + RunnerVersion = "2.306.0" ) // If you're willing to run this test via VS Code "run test" or "debug test", From c4f2408e09508c3aba61114df9a78474c4e52d5e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Jul 2023 13:48:27 +0200 Subject: [PATCH 245/561] Bump github.com/cloudflare/circl from 1.1.0 to 1.3.3 (#2628) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Nikola Jokic --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index a0be96e515..330378de77 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/cloudflare/circl v1.1.0 // indirect + github.com/cloudflare/circl v1.3.3 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect diff --git a/go.sum b/go.sum index fb502a7a87..b23dc6d1ea 100644 --- a/go.sum +++ b/go.sum @@ -65,8 +65,9 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= From 3e7caace833a26fd4f2576937642c9a31c5230b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 12:07:33 +0200 Subject: [PATCH 246/561] Bump golang.org/x/net from 0.11.0 to 0.12.0 (#2750) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 330378de77..95f1ba605c 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/teambition/rrule-go v1.8.2 go.uber.org/multierr v1.10.0 go.uber.org/zap v1.24.0 - golang.org/x/net v0.11.0 + golang.org/x/net v0.12.0 golang.org/x/oauth2 v0.7.0 golang.org/x/sync v0.2.0 gomodules.xyz/jsonpatch/v2 v2.2.0 @@ -90,10 +90,10 @@ require ( github.com/stretchr/objx v0.5.0 // indirect github.com/urfave/cli v1.22.2 // indirect go.uber.org/atomic v1.7.0 // indirect - golang.org/x/crypto v0.10.0 // indirect - golang.org/x/sys v0.9.0 // indirect - golang.org/x/term v0.9.0 // indirect - golang.org/x/text v0.10.0 // indirect + golang.org/x/crypto v0.11.0 // indirect + golang.org/x/sys v0.10.0 // indirect + golang.org/x/term v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.9.3 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index b23dc6d1ea..395b89f74c 100644 --- a/go.sum +++ b/go.sum @@ -396,8 +396,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -464,8 +464,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -535,12 +535,12 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -548,8 +548,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 461214801119c37a6a17461b0021a443e97851d8 Mon Sep 17 00:00:00 2001 From: Lars Lange <9141483+Langleu@users.noreply.github.com> Date: Tue, 25 Jul 2023 02:04:54 +0200 Subject: [PATCH 247/561] fix: remove callbacks resulting in scales due to incomplete response (#2671) Co-authored-by: Yusuke Kuoka --- .../actions.summerwind.net/autoscaling.go | 13 ++-- .../autoscaling_test.go | 77 +++++++++++++++---- 2 files changed, 71 insertions(+), 19 deletions(-) diff --git a/controllers/actions.summerwind.net/autoscaling.go b/controllers/actions.summerwind.net/autoscaling.go index ab6230511a..ea21f95321 100644 --- a/controllers/actions.summerwind.net/autoscaling.go +++ b/controllers/actions.summerwind.net/autoscaling.go @@ -118,10 +118,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr } var total, inProgress, queued, completed, unknown int - type callback func() - listWorkflowJobs := func(user string, repoName string, runID int64, fallback_cb callback) { + listWorkflowJobs := func(user string, repoName string, runID int64) { if runID == 0 { - fallback_cb() + // should not happen in reality + r.Log.Info("Detected run with no runID of 0, ignoring the case and not scaling.", "repo_name", repoName, "run_id", runID) return } opt := github.ListWorkflowJobsOptions{ListOptions: github.ListOptions{PerPage: 50}} @@ -139,7 +139,8 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr opt.Page = resp.NextPage } if len(allJobs) == 0 { - fallback_cb() + // GitHub API can return run with empty job array - should be ignored + r.Log.Info("Detected run with no jobs, ignoring the case and not scaling.", "repo_name", repoName, "run_id", runID) } else { JOB: for _, job := range allJobs { @@ -201,9 +202,9 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr case "completed": completed++ case "in_progress": - listWorkflowJobs(user, repoName, run.GetID(), func() { inProgress++ }) + listWorkflowJobs(user, repoName, run.GetID()) case "queued": - listWorkflowJobs(user, repoName, run.GetID(), func() { queued++ }) + listWorkflowJobs(user, repoName, run.GetID()) default: unknown++ } diff --git a/controllers/actions.summerwind.net/autoscaling_test.go b/controllers/actions.summerwind.net/autoscaling_test.go index ec0ac79ae2..ee42f9a467 100644 --- a/controllers/actions.summerwind.net/autoscaling_test.go +++ b/controllers/actions.summerwind.net/autoscaling_test.go @@ -61,8 +61,9 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) { want int err string }{ + // case_0 // Legacy functionality - // 3 demanded, max at 3 + // 0 demanded due to zero runID, min at 2 { repo: "test/valid", min: intPtr(2), @@ -70,9 +71,10 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) { workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`, workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`, workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`, - want: 3, + want: 2, }, - // Explicitly speified the default `self-hosted` label which is ignored by the simulator, + // case_1 + // Explicitly specified the default `self-hosted` label which is ignored by the simulator, // as we assume that GitHub Actions automatically associates the `self-hosted` label to every self-hosted runner. // 3 demanded, max at 3 { @@ -80,11 +82,17 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) { labels: []string{"self-hosted"}, min: intPtr(2), max: intPtr(3), - workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`, - workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`, - workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`, - want: 3, + workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`, + workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`, + workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}]}"`, + workflowJobs: map[int]string{ + 1: `{"jobs": [{"status": "queued", "labels":["self-hosted"]}]}`, + 2: `{"jobs": [{"status": "in_progress", "labels":["self-hosted"]}]}`, + 3: `{"jobs": [{"status": "in_progress", "labels":["self-hosted"]}]}`, + }, + want: 3, }, + // case_2 // 2 demanded, max at 3, currently 3, delay scaling down due to grace period { repo: "test/valid", @@ -97,6 +105,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`, want: 3, }, + // case_3 // 3 demanded, max at 2 { repo: "test/valid", @@ -107,6 +116,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`, want: 2, }, + // case_4 // 2 demanded, min at 2 { repo: "test/valid", @@ -117,6 +127,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`, want: 2, }, + // case_5 // 1 demanded, min at 2 { repo: "test/valid", @@ -127,6 +138,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`, want: 2, }, + // case_6 // 1 demanded, min at 2 { repo: "test/valid", @@ -137,6 +149,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`, want: 2, }, + // case_7 // 1 demanded, min at 1 { repo: "test/valid", @@ -147,6 +160,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`, want: 1, }, + // case_8 // 1 demanded, min at 1 { repo: "test/valid", @@ -157,6 +171,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`, want: 1, }, + // case_9 // fixed at 3 { repo: "test/valid", @@ -166,9 +181,36 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) { workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`, workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`, workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}]}"`, - want: 3, + want: 1, + }, + // Case for empty GitHub Actions reponse - should not trigger scale up + { + description: "GitHub Actions Jobs Array is empty - no scale up", + repo: "test/valid", + min: intPtr(0), + max: intPtr(3), + workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`, + workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`, + workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`, + workflowJobs: map[int]string{ + 1: `{"jobs": []}`, + }, + want: 0, + }, + // Case for hosted GitHub Actions run + { + description: "Hosted GitHub Actions run - no scale up", + repo: "test/valid", + min: intPtr(0), + max: intPtr(3), + workflowRuns: `{"total_count": 2, "workflow_runs":[{"id": 1, "status":"queued"}, {"status":"completed"}]}"`, + workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`, + workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`, + workflowJobs: map[int]string{ + 1: `{"jobs": [{"status":"queued"}]}`, + }, + want: 0, }, - { description: "Job-level autoscaling with no explicit runner label (runners have implicit self-hosted, requested self-hosted, 5 jobs from 3 workflows)", repo: "test/valid", @@ -422,7 +464,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) { want int err string }{ - // 3 demanded, max at 3 + // case_0 + // 0 demanded due to zero runID, min at 2 { org: "test", repos: []string{"valid"}, @@ -431,8 +474,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) { workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`, workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`, workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`, - want: 3, + want: 2, }, + // case_1 // 2 demanded, max at 3, currently 3, delay scaling down due to grace period { org: "test", @@ -446,6 +490,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`, want: 3, }, + // case_2 // 3 demanded, max at 2 { org: "test", @@ -457,6 +502,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`, want: 2, }, + // case_3 // 2 demanded, min at 2 { org: "test", @@ -468,6 +514,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`, want: 2, }, + // case_4 // 1 demanded, min at 2 { org: "test", @@ -479,6 +526,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`, want: 2, }, + // case_5 // 1 demanded, min at 2 { org: "test", @@ -512,6 +560,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) { workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`, want: 1, }, + // case_6 // fixed at 3 { org: "test", @@ -522,8 +571,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) { workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`, workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`, workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`, - want: 3, + want: 1, }, + // case_7 // org runner, fixed at 3 { org: "test", @@ -534,8 +584,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) { workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`, workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`, workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`, - want: 3, + want: 1, }, + // case_8 // org runner, 1 demanded, min at 1, no repos { org: "test", From 2d9ba268da0db226b0e37c81ccf799037f0ab5c3 Mon Sep 17 00:00:00 2001 From: Gavin Williams Date: Tue, 25 Jul 2023 01:53:50 +0100 Subject: [PATCH 248/561] Fix `panic: slice bounds out of range` when runner spec contains `volumeMounts`. (#2720) Signed-off-by: Gavin Williams --- .../runner_controller.go | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/controllers/actions.summerwind.net/runner_controller.go b/controllers/actions.summerwind.net/runner_controller.go index a711fd8c71..476e5c5447 100644 --- a/controllers/actions.summerwind.net/runner_controller.go +++ b/controllers/actions.summerwind.net/runner_controller.go @@ -607,10 +607,13 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) { if runnerSpec.ContainerMode == "kubernetes" { return pod, errors.New("volume mount \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes") } - // remove work volume since it will be provided from runnerSpec.Volumes - // if we don't remove it here we would get a duplicate key error, i.e. two volumes named work - _, index := workVolumeMountPresent(pod.Spec.Containers[0].VolumeMounts) - pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts[:index], pod.Spec.Containers[0].VolumeMounts[index+1:]...) + + podSpecIsPresent, index := workVolumeMountPresent(pod.Spec.Containers[0].VolumeMounts) + if podSpecIsPresent { + // remove work volume since it will be provided from runnerSpec.Volumes + // if we don't remove it here we would get a duplicate key error, i.e. two volumes named work + pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts[:index], pod.Spec.Containers[0].VolumeMounts[index+1:]...) + } } pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, runnerSpec.VolumeMounts...) @@ -623,11 +626,13 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) { if runnerSpec.ContainerMode == "kubernetes" { return pod, errors.New("volume \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes") } - _, index := workVolumePresent(pod.Spec.Volumes) - // remove work volume since it will be provided from runnerSpec.Volumes - // if we don't remove it here we would get a duplicate key error, i.e. two volumes named work - pod.Spec.Volumes = append(pod.Spec.Volumes[:index], pod.Spec.Volumes[index+1:]...) + podSpecIsPresent, index := workVolumePresent(pod.Spec.Volumes) + if podSpecIsPresent { + // remove work volume since it will be provided from runnerSpec.Volumes + // if we don't remove it here we would get a duplicate key error, i.e. two volumes named work + pod.Spec.Volumes = append(pod.Spec.Volumes[:index], pod.Spec.Volumes[index+1:]...) + } } pod.Spec.Volumes = append(pod.Spec.Volumes, runnerSpec.Volumes...) From 58a189935e2fc0fb1df6c34e277a7abff9492bfd Mon Sep 17 00:00:00 2001 From: Daniel Kubat Date: Tue, 25 Jul 2023 02:54:09 +0200 Subject: [PATCH 249/561] Upgrade Docker Compose to v2.20.0 (#2738) --- runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile | 2 +- runner/actions-runner-dind.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner-dind.ubuntu-22.04.dockerfile | 2 +- runner/actions-runner.ubuntu-20.04.dockerfile | 2 +- runner/actions-runner.ubuntu-22.04.dockerfile | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile index f8875bc9e2..79b540d74b 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile @@ -5,7 +5,7 @@ ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ENV CHANNEL=stable -ARG DOCKER_COMPOSE_VERSION=v2.16.0 +ARG DOCKER_COMPOSE_VERSION=v2.20.0 ARG DUMB_INIT_VERSION=1.2.5 # Other arguments diff --git a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile index 06621bd7a6..506f16f657 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile @@ -5,7 +5,7 @@ ARG RUNNER_VERSION ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ENV CHANNEL=stable -ARG DOCKER_COMPOSE_VERSION=v2.16.0 +ARG DOCKER_COMPOSE_VERSION=v2.20.0 ARG DUMB_INIT_VERSION=1.2.5 ARG RUNNER_USER_UID=1001 diff --git a/runner/actions-runner-dind.ubuntu-20.04.dockerfile b/runner/actions-runner-dind.ubuntu-20.04.dockerfile index c69d3e0903..e3b96e0047 100644 --- a/runner/actions-runner-dind.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-20.04.dockerfile @@ -6,7 +6,7 @@ ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.23 -ARG DOCKER_COMPOSE_VERSION=v2.16.0 +ARG DOCKER_COMPOSE_VERSION=v2.20.0 ARG DUMB_INIT_VERSION=1.2.5 # Use 1001 and 121 for compatibility with GitHub-hosted runners diff --git a/runner/actions-runner-dind.ubuntu-22.04.dockerfile b/runner/actions-runner-dind.ubuntu-22.04.dockerfile index 03ee37a2a1..6f252ba521 100644 --- a/runner/actions-runner-dind.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind.ubuntu-22.04.dockerfile @@ -6,7 +6,7 @@ ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.23 -ARG DOCKER_COMPOSE_VERSION=v2.16.0 +ARG DOCKER_COMPOSE_VERSION=v2.20.0 ARG DUMB_INIT_VERSION=1.2.5 ARG RUNNER_USER_UID=1001 ARG DOCKER_GROUP_GID=121 diff --git a/runner/actions-runner.ubuntu-20.04.dockerfile b/runner/actions-runner.ubuntu-20.04.dockerfile index a5c7d0a40c..c153998ea2 100644 --- a/runner/actions-runner.ubuntu-20.04.dockerfile +++ b/runner/actions-runner.ubuntu-20.04.dockerfile @@ -6,7 +6,7 @@ ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.23 -ARG DOCKER_COMPOSE_VERSION=v2.16.0 +ARG DOCKER_COMPOSE_VERSION=v2.20.0 ARG DUMB_INIT_VERSION=1.2.5 # Use 1001 and 121 for compatibility with GitHub-hosted runners diff --git a/runner/actions-runner.ubuntu-22.04.dockerfile b/runner/actions-runner.ubuntu-22.04.dockerfile index 82a43d2ca3..f5fe14927d 100644 --- a/runner/actions-runner.ubuntu-22.04.dockerfile +++ b/runner/actions-runner.ubuntu-22.04.dockerfile @@ -6,7 +6,7 @@ ARG RUNNER_CONTAINER_HOOKS_VERSION # Docker and Docker Compose arguments ARG CHANNEL=stable ARG DOCKER_VERSION=20.10.23 -ARG DOCKER_COMPOSE_VERSION=v2.16.0 +ARG DOCKER_COMPOSE_VERSION=v2.20.0 ARG DUMB_INIT_VERSION=1.2.5 ARG RUNNER_USER_UID=1001 ARG DOCKER_GROUP_GID=121 From 6cc54b8f279a657a6f3a612f2a537ec8901025b3 Mon Sep 17 00:00:00 2001 From: marcin-motyl <118439707+marcin-motyl@users.noreply.github.com> Date: Tue, 25 Jul 2023 02:56:20 +0200 Subject: [PATCH 250/561] Fix deployment & service values in actionsMetrics (#2683) --- .../templates/actionsmetrics.deployment.yaml | 16 ++++++++-------- .../templates/actionsmetrics.service.yaml | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml b/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml index d7cb67b239..0658a07fb2 100644 --- a/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml +++ b/charts/actions-runner-controller/templates/actionsmetrics.deployment.yaml @@ -36,8 +36,8 @@ spec: {{- end }} containers: - args: - {{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }} - {{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }} + {{- $metricsHost := .Values.actionsMetrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }} + {{- $metricsPort := .Values.actionsMetrics.proxy.enabled | ternary "8080" .Values.actionsMetrics.port }} - "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}" {{- if .Values.actionsMetricsServer.logLevel }} - "--log-level={{ .Values.actionsMetricsServer.logLevel }}" @@ -122,8 +122,8 @@ spec: - containerPort: 8000 name: http protocol: TCP - {{- if not .Values.metrics.proxy.enabled }} - - containerPort: {{ .Values.metrics.port }} + {{- if not .Values.actionsMetrics.proxy.enabled }} + - containerPort: {{ .Values.actionsMetrics.port }} name: metrics-port protocol: TCP {{- end }} @@ -131,17 +131,17 @@ spec: {{- toYaml .Values.actionsMetricsServer.resources | nindent 12 }} securityContext: {{- toYaml .Values.actionsMetricsServer.securityContext | nindent 12 }} - {{- if .Values.metrics.proxy.enabled }} + {{- if .Values.actionsMetrics.proxy.enabled }} - args: - - "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}" + - "--secure-listen-address=0.0.0.0:{{ .Values.actionsMetrics.port }}" - "--upstream=http://127.0.0.1:8080/" - "--logtostderr=true" - "--v=10" - image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}" + image: "{{ .Values.actionsMetrics.proxy.image.repository }}:{{ .Values.actionsMetrics.proxy.image.tag }}" name: kube-rbac-proxy imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - - containerPort: {{ .Values.metrics.port }} + - containerPort: {{ .Values.actionsMetrics.port }} name: metrics-port resources: {{- toYaml .Values.resources | nindent 12 }} diff --git a/charts/actions-runner-controller/templates/actionsmetrics.service.yaml b/charts/actions-runner-controller/templates/actionsmetrics.service.yaml index 0cfae32a6d..c465e67779 100644 --- a/charts/actions-runner-controller/templates/actionsmetrics.service.yaml +++ b/charts/actions-runner-controller/templates/actionsmetrics.service.yaml @@ -16,9 +16,9 @@ spec: {{ range $_, $port := .Values.actionsMetricsServer.service.ports -}} - {{ $port | toYaml | nindent 6 }} {{- end }} - {{- if .Values.metrics.serviceMonitor }} + {{- if .Values.actionsMetrics.serviceMonitor }} - name: metrics-port - port: {{ .Values.metrics.port }} + port: {{ .Values.actionsMetrics.port }} targetPort: metrics-port {{- end }} selector: From 8c6d29afe5d957af1ede98c9850a73f1c093a536 Mon Sep 17 00:00:00 2001 From: Ekaterina Sobolevskaia <98894503+sobolevskaya-k@users.noreply.github.com> Date: Tue, 25 Jul 2023 05:38:13 +0100 Subject: [PATCH 251/561] add opportunity write dnsPolicy for controller by helm values (#2708) --- charts/actions-runner-controller/README.md | 1 + charts/actions-runner-controller/templates/deployment.yaml | 3 +++ charts/actions-runner-controller/values.yaml | 4 ++++ 3 files changed, 8 insertions(+) diff --git a/charts/actions-runner-controller/README.md b/charts/actions-runner-controller/README.md index d291bb6fee..2ed82f96ae 100644 --- a/charts/actions-runner-controller/README.md +++ b/charts/actions-runner-controller/README.md @@ -35,6 +35,7 @@ All additional docs are kept in the `docs/` folder, this README is solely for do | `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | | | `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | | | `hostNetwork` | The "hostNetwork" of the controller container | false | +| `dnsPolicy` | The "dnsPolicy" of the controller container | ClusterFirst | | `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller | | `image.tag` | The tag of the controller container | | | `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest | diff --git a/charts/actions-runner-controller/templates/deployment.yaml b/charts/actions-runner-controller/templates/deployment.yaml index 845da8356b..3490f98904 100644 --- a/charts/actions-runner-controller/templates/deployment.yaml +++ b/charts/actions-runner-controller/templates/deployment.yaml @@ -214,3 +214,6 @@ spec: {{- if .Values.hostNetwork }} hostNetwork: {{ .Values.hostNetwork }} {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end }} diff --git a/charts/actions-runner-controller/values.yaml b/charts/actions-runner-controller/values.yaml index 97d0eb7437..92cb3dce22 100644 --- a/charts/actions-runner-controller/values.yaml +++ b/charts/actions-runner-controller/values.yaml @@ -189,6 +189,10 @@ admissionWebHooks: # https://github.com/actions/actions-runner-controller/issues/1005#issuecomment-993097155 #hostNetwork: true +# If you use `hostNetwork: true`, then you need dnsPolicy: ClusterFirstWithHostNet +# https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy +#dnsPolicy: ClusterFirst + ## specify log format for actions runner controller. Valid options are "text" and "json" logFormat: text From e4970a6f9ce8fdf3640329e9e2803050ecf1a68f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Rein?= Date: Tue, 25 Jul 2023 06:45:44 +0200 Subject: [PATCH 252/561] fixed indent in a README example (#2725) --- docs/using-custom-volumes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/using-custom-volumes.md b/docs/using-custom-volumes.md index 57771c8cef..36f4c2bd61 100644 --- a/docs/using-custom-volumes.md +++ b/docs/using-custom-volumes.md @@ -78,7 +78,7 @@ spec: - hostPath: path: /mnt/disks/ssd0 name: tmp - ephemeral: true # VERY important. otherwise data inside the workdir and /tmp is not cleared between builds + ephemeral: true # VERY important. otherwise data inside the workdir and /tmp is not cleared between builds ``` ### Docker image layers caching @@ -202,4 +202,4 @@ spec: resources: requests: storage: 10Gi -``` \ No newline at end of file +``` From 11f2f376a379fc3f89f14d4707836dd1a8ccbf7e Mon Sep 17 00:00:00 2001 From: arielly-parussulo <97965681+arielly-parussulo@users.noreply.github.com> Date: Tue, 25 Jul 2023 01:59:41 -0300 Subject: [PATCH 253/561] add interval and timeout configuration for the actions-runner-controler serviceMonitors (#2654) Co-authored-by: Yusuke Kuoka --- charts/actions-runner-controller/README.md | 8 ++++++-- .../templates/actionsmetrics.service.yaml | 2 +- .../templates/actionsmetrics.servicemonitor.yaml.yml | 4 +++- .../templates/controller.metrics.serviceMonitor.yaml | 4 +++- .../templates/githubwebhook.service.yaml | 2 +- .../templates/githubwebhook.serviceMonitor.yaml | 4 +++- charts/actions-runner-controller/values.yaml | 10 ++++++++-- 7 files changed, 25 insertions(+), 9 deletions(-) diff --git a/charts/actions-runner-controller/README.md b/charts/actions-runner-controller/README.md index 2ed82f96ae..8202b7b7f9 100644 --- a/charts/actions-runner-controller/README.md +++ b/charts/actions-runner-controller/README.md @@ -42,7 +42,9 @@ All additional docs are kept in the `docs/` folder, this README is solely for do | `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | | | `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind | | `image.pullPolicy` | The pull policy of the controller image | IfNotPresent | -| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false | +| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false | +| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m | +| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s | | `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | | | `metrics.port` | Set port of metrics service | 8443 | | `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true | @@ -149,7 +151,9 @@ All additional docs are kept in the `docs/` folder, this README is solely for do | `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` | | `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | | | `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | | -| `actionsMetrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false | +| `actionsMetrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false | +| `actionsMetrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m | +| `actionsMetrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s | | `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | | | `actionsMetrics.port` | Set port of actions metrics service | 8443 | | `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true | diff --git a/charts/actions-runner-controller/templates/actionsmetrics.service.yaml b/charts/actions-runner-controller/templates/actionsmetrics.service.yaml index c465e67779..4ff8830b50 100644 --- a/charts/actions-runner-controller/templates/actionsmetrics.service.yaml +++ b/charts/actions-runner-controller/templates/actionsmetrics.service.yaml @@ -16,7 +16,7 @@ spec: {{ range $_, $port := .Values.actionsMetricsServer.service.ports -}} - {{ $port | toYaml | nindent 6 }} {{- end }} - {{- if .Values.actionsMetrics.serviceMonitor }} + {{- if .Values.actionsMetrics.serviceMonitor.enable }} - name: metrics-port port: {{ .Values.actionsMetrics.port }} targetPort: metrics-port diff --git a/charts/actions-runner-controller/templates/actionsmetrics.servicemonitor.yaml.yml b/charts/actions-runner-controller/templates/actionsmetrics.servicemonitor.yaml.yml index d25400fcaa..307a1cb2fd 100644 --- a/charts/actions-runner-controller/templates/actionsmetrics.servicemonitor.yaml.yml +++ b/charts/actions-runner-controller/templates/actionsmetrics.servicemonitor.yaml.yml @@ -1,4 +1,4 @@ -{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor }} +{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor.enable }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: @@ -19,6 +19,8 @@ spec: tlsConfig: insecureSkipVerify: true {{- end }} + interval: {{ .Values.actionsMetrics.serviceMonitor.interval }} + scrapeTimeout: {{ .Values.actionsMetrics.serviceMonitor.timeout }} selector: matchLabels: {{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 6 }} diff --git a/charts/actions-runner-controller/templates/controller.metrics.serviceMonitor.yaml b/charts/actions-runner-controller/templates/controller.metrics.serviceMonitor.yaml index 07b2f3b9d0..b1ab0d90d0 100644 --- a/charts/actions-runner-controller/templates/controller.metrics.serviceMonitor.yaml +++ b/charts/actions-runner-controller/templates/controller.metrics.serviceMonitor.yaml @@ -1,4 +1,4 @@ -{{- if .Values.metrics.serviceMonitor }} +{{- if .Values.metrics.serviceMonitor.enable }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: @@ -19,6 +19,8 @@ spec: tlsConfig: insecureSkipVerify: true {{- end }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.timeout }} selector: matchLabels: {{- include "actions-runner-controller.selectorLabels" . | nindent 6 }} diff --git a/charts/actions-runner-controller/templates/githubwebhook.service.yaml b/charts/actions-runner-controller/templates/githubwebhook.service.yaml index 6835c8cf32..6ec28acfe1 100644 --- a/charts/actions-runner-controller/templates/githubwebhook.service.yaml +++ b/charts/actions-runner-controller/templates/githubwebhook.service.yaml @@ -16,7 +16,7 @@ spec: {{ range $_, $port := .Values.githubWebhookServer.service.ports -}} - {{ $port | toYaml | nindent 6 }} {{- end }} - {{- if .Values.metrics.serviceMonitor }} + {{- if .Values.metrics.serviceMonitor.enable }} - name: metrics-port port: {{ .Values.metrics.port }} targetPort: metrics-port diff --git a/charts/actions-runner-controller/templates/githubwebhook.serviceMonitor.yaml b/charts/actions-runner-controller/templates/githubwebhook.serviceMonitor.yaml index f659cc421b..81d9b59135 100644 --- a/charts/actions-runner-controller/templates/githubwebhook.serviceMonitor.yaml +++ b/charts/actions-runner-controller/templates/githubwebhook.serviceMonitor.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor }} +{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor.enable }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: @@ -19,6 +19,8 @@ spec: tlsConfig: insecureSkipVerify: true {{- end }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.timeout }} selector: matchLabels: {{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }} diff --git a/charts/actions-runner-controller/values.yaml b/charts/actions-runner-controller/values.yaml index 92cb3dce22..1a719f8dc8 100644 --- a/charts/actions-runner-controller/values.yaml +++ b/charts/actions-runner-controller/values.yaml @@ -109,7 +109,10 @@ service: # Metrics service resource metrics: serviceAnnotations: {} - serviceMonitor: false + serviceMonitor: + enable: false + timeout: 30s + interval: 1m serviceMonitorLabels: {} port: 8443 proxy: @@ -308,7 +311,10 @@ actionsMetrics: # as a part of the helm release. # Do note that you also need actionsMetricsServer.enabled=true # to deploy the actions-metrics-server whose k8s service is referenced by the service monitor. - serviceMonitor: false + serviceMonitor: + enable: false + timeout: 30s + interval: 1m serviceMonitorLabels: {} port: 8443 proxy: From 138031c362cb7e049e61f25a5560e2793202d904 Mon Sep 17 00:00:00 2001 From: Thorsten Wildberger Date: Tue, 25 Jul 2023 06:59:49 +0200 Subject: [PATCH 254/561] feat: allow more dockerd options (#2701) --- docs/using-entrypoint-features.md | 43 +++++++++++++++++++ ...nner-dind-rootless.ubuntu-20.04.dockerfile | 4 ++ ...nner-dind-rootless.ubuntu-22.04.dockerfile | 4 ++ runner/entrypoint-dind-rootless.sh | 1 - 4 files changed, 51 insertions(+), 1 deletion(-) diff --git a/docs/using-entrypoint-features.md b/docs/using-entrypoint-features.md index 98ff04f7bd..432b4d4db7 100644 --- a/docs/using-entrypoint-features.md +++ b/docs/using-entrypoint-features.md @@ -66,4 +66,47 @@ spec: value: "172.17.0.0/12" - name: DOCKER_DEFAULT_ADDRESS_POOL_SIZE value: "24" +``` + +More options can be configured by mounting a configmap to the daemon.json location: + +- rootless: /home/runner/.config/docker/daemon.json +- rootful: /etc/docker/daemon.json + +```yaml +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: example-runnerdeployment +spec: + template: + spec: + dockerdWithinRunnerContainer: true + image: summerwind/actions-runner-dind(-rootless) + volumeMounts: + - mountPath: /home/runner/.config/docker/daemon.json + name: daemon-config-volume + subPath: daemon.json + volumes: + - name: daemon-config-volume + configMap: + name: daemon-cm + items: + - key: daemon.json + path: daemon.json + securityContext: + fsGroup: 1001 # runner user id +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: daemon-cm +data: + daemon.json: | + { + "log-level": "warn", + "dns": ["x.x.x.x"] + } ``` \ No newline at end of file diff --git a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile index 79b540d74b..0991c26540 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-20.04.dockerfile @@ -146,5 +146,9 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && which docker-compose \ && docker compose version +# Create folder structure here to avoid permission issues +# when mounting the daemon.json file from a configmap. +RUN mkdir -p /home/runner/.config/docker + ENTRYPOINT ["/bin/bash", "-c"] CMD ["entrypoint-dind-rootless.sh"] diff --git a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile index 506f16f657..68e11e0a90 100644 --- a/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile +++ b/runner/actions-runner-dind-rootless.ubuntu-22.04.dockerfile @@ -123,5 +123,9 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && which docker-compose \ && docker compose version +# Create folder structure here to avoid permission issues +# when mounting the daemon.json file from a configmap. +RUN mkdir -p /home/runner/.config/docker + ENTRYPOINT ["/bin/bash", "-c"] CMD ["entrypoint-dind-rootless.sh"] diff --git a/runner/entrypoint-dind-rootless.sh b/runner/entrypoint-dind-rootless.sh index 668de49251..9c8cc01150 100644 --- a/runner/entrypoint-dind-rootless.sh +++ b/runner/entrypoint-dind-rootless.sh @@ -5,7 +5,6 @@ trap graceful_stop TERM log.notice "Writing out Docker config file" /bin/bash <