diff --git a/.github/workflows/go-ci.yml b/.github/workflows/go-ci.yml
new file mode 100644
index 0000000000..3fdc914fb8
--- /dev/null
+++ b/.github/workflows/go-ci.yml
@@ -0,0 +1,99 @@
+name: Go ci
+
+on:
+ push:
+ branches: ["lab03", "master"]
+ paths:
+ - "app_go/**"
+ - ".github/workflows/go-ci.yml"
+ pull_request:
+ branches: ["lab03", "master"]
+ paths:
+ - "app_go/**"
+ - ".github/workflows/go-ci.yml"
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: app_go
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-go@v5
+ with:
+ go-version: "1.22"
+ cache: true
+
+ - name: Go fmt check
+ run: |
+ test -z "$(gofmt -l .)"
+
+ - name: Run tests with coverage
+ working-directory: app_go
+ run: go test -coverprofile=coverage.out ./...
+
+ - name: Upload coverage artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: go-coverage
+ path: app_go/coverage.out
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v5
+ with:
+ files: app_go/coverage.out
+ flags: go
+ env:
+ CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
+
+ lint:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: app_go
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-go@v5
+ with:
+ go-version: "1.22"
+ cache: true
+
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v6
+ with:
+ version: v1.61.0
+ working-directory: app_go
+
+ docker:
+ needs: [test, lint]
+ runs-on: ubuntu-latest
+ if: github.event_name == 'push' && (github.ref == 'refs/heads/lab03' || github.ref == 'refs/heads/master')
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_TOKEN }}
+
+ - name: Generate version
+ run: echo "VERSION=$(date -u +%Y.%m.%d)" >> $GITHUB_ENV
+
+ - name: Build and push
+ uses: docker/build-push-action@v6
+ with:
+ context: ./app_go
+ push: true
+ tags: |
+ ${{ secrets.DOCKER_USERNAME }}/go_app:${{ env.VERSION }}
+ ${{ secrets.DOCKER_USERNAME }}/go_app:${{ github.sha }}
+ ${{ secrets.DOCKER_USERNAME }}/go_app:latest
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml
new file mode 100644
index 0000000000..f0ed5ca75c
--- /dev/null
+++ b/.github/workflows/python-ci.yml
@@ -0,0 +1,99 @@
+name: Python ci
+
+
+on:
+ push:
+ branches: [ "lab03", "master" ]
+ paths:
+ - 'app_python/**'
+ - '.github/workflows/python-ci.yml'
+ pull_request:
+ branches: [ "lab03", "master" ]
+ paths:
+ - 'app_python/**'
+ - '.github/workflows/python-ci.yml'
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v4
+ with:
+ python-version: 3.12
+ cache: "pip"
+ cache-dependency-path: app_python/requirements.txt
+
+ - name: Install dependencies
+ run: pip install -r app_python/requirements.txt
+
+ - name: Run linter
+ run: flake8
+
+ - name: Run tests
+ working-directory: app_python
+ run: pytest --cov=. --cov-report=xml:coverage.xml --cov-report=term
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v5
+ with:
+ files: app_python/coverage.xml
+ fail_ci_if_error: true
+ flags: python
+ env:
+ CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
+
+ security:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+ cache: "pip"
+ cache-dependency-path: app_python/requirements.txt
+
+ - name: Install dependencies
+ run: pip install -r app_python/requirements.txt
+
+ - name: Setup Snyk CLI
+ uses: snyk/actions/setup@master
+
+ - name: Run Snyk (dependencies)
+ working-directory: app_python
+ env:
+ SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
+ run: snyk test --severity-threshold=high
+
+ docker:
+ needs: [ test, security ]
+ runs-on: ubuntu-latest
+ if: github.event_name == 'push' && (github.ref == 'refs/heads/lab03' || github.ref == 'refs/heads/master')
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_TOKEN }}
+
+ - name: Generate version
+ run: echo "VERSION=$(date -u +%Y.%m.%d)" >> $GITHUB_ENV
+
+ - name: Build and push
+ uses: docker/build-push-action@v6
+ with:
+ context: ./app_python
+ push: true
+ tags: |
+ ${{ secrets.DOCKER_USERNAME }}/python_app:${{ env.VERSION }}
+ ${{ secrets.DOCKER_USERNAME }}/python_app:${{ github.sha }}
+ ${{ secrets.DOCKER_USERNAME }}/python_app:latest
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+
diff --git a/.github/workflows/terraform-ci.yml b/.github/workflows/terraform-ci.yml
new file mode 100644
index 0000000000..6931a90b72
--- /dev/null
+++ b/.github/workflows/terraform-ci.yml
@@ -0,0 +1,94 @@
+name: Terraform CI
+
+on:
+ pull_request:
+ paths:
+ - 'terraform/**'
+ - '.github/workflows/terraform-ci.yml'
+ push:
+ branches:
+ - lab04
+ paths:
+ - 'terraform/**'
+ - '.github/workflows/terraform-ci.yml'
+
+jobs:
+ terraform-validate:
+ name: Terraform Validation
+ runs-on: ubuntu-latest
+
+ defaults:
+ run:
+ working-directory: terraform
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Setup Terraform
+ uses: hashicorp/setup-terraform@v3
+ with:
+ terraform_version: 1.9.0
+
+ - name: Terraform Format Check
+ id: fmt
+ run: terraform fmt -check -recursive
+ continue-on-error: true
+
+ - name: Terraform Init
+ id: init
+ run: terraform init -backend=false
+
+ - name: Terraform Validate
+ id: validate
+ run: |
+ terraform validate -no-color | tee validate.txt
+
+ - name: Setup TFLint
+ uses: terraform-linters/setup-tflint@v4
+ with:
+ tflint_version: latest
+
+ - name: Initialize TFLint
+ run: tflint --init
+
+ - name: Run TFLint
+ id: tflint
+ run: tflint --format compact
+ continue-on-error: true
+
+ - name: Comment PR with Results
+ if: github.event_name == 'pull_request'
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const fs = require('fs');
+ const validateOut = fs.existsSync('terraform/validate.txt')
+ ? fs.readFileSync('terraform/validate.txt', 'utf8')
+ : 'No validation output';
+
+ const output = `#### Terraform Format and Style π\`${{ steps.fmt.outcome }}\`
+ #### Terraform Initialization βοΈ\`${{ steps.init.outcome }}\`
+ #### Terraform Validation π€\`${{ steps.validate.outcome }}\`
+ #### TFLint π\`${{ steps.tflint.outcome }}\`
+
+ Show Validation Output
+
+ \`\`\`
+ ${validateOut}
+ \`\`\`
+
+
+
+ *Pusher: @${{ github.actor }}, Action: \`${{ github.event_name }}\`, Workflow: \`${{ github.workflow }}\`*`;
+
+ github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: output
+ })
+
+ - name: Fail if validation failed
+ if: steps.fmt.outcome == 'failure' || steps.validate.outcome == 'failure'
+ run: exit 1
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 30d74d2584..cd53c583d7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,70 @@
-test
\ No newline at end of file
+# General
+test
+.DS_Store
+*.log
+
+# Terraform
+*.terraform.lock.hcl
+*.tfstate
+*.tfstate.*
+.terraform/
+terraform.tfvars
+*.tfvars
+crash.log
+crash.*.log
+override.tf
+override.tf.json
+*_override.tf
+*_override.tf.json
+
+# Pulumi
+pulumi/venv/
+pulumi/__pycache__/
+Pulumi.*.yaml
+.pulumi/
+
+# Cloud credentials
+*.pem
+*.key
+*.json
+credentials
+.aws/
+.azure/
+.gcp/
+
+# IDE
+.vscode/
+.idea/
+*.swp
+*.swo
+*~
+
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+env/
+venv/
+ENV/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Node
+node_modules/
+npm-debug.log
+yarn-error.log
\ No newline at end of file
diff --git a/app_go/.dockerignore b/app_go/.dockerignore
new file mode 100644
index 0000000000..52c65fe81d
--- /dev/null
+++ b/app_go/.dockerignore
@@ -0,0 +1,9 @@
+.git
+*.md
+docs/
+bin/
+dist/
+tmp/
+.idea/
+.vscode/
+.DS_Store
\ No newline at end of file
diff --git a/app_go/.gitignore b/app_go/.gitignore
new file mode 100644
index 0000000000..d6c75d54e3
--- /dev/null
+++ b/app_go/.gitignore
@@ -0,0 +1,18 @@
+# Go binaries / build output
+devops-info-service
+*.exe
+*.out
+*.test
+bin/
+dist/
+
+# IDE/editor
+.vscode/
+.idea/
+
+# OS files
+.DS_Store
+Thumbs.db
+
+# Logs
+*.log
\ No newline at end of file
diff --git a/app_go/Dockerfile b/app_go/Dockerfile
new file mode 100644
index 0000000000..6dd906a964
--- /dev/null
+++ b/app_go/Dockerfile
@@ -0,0 +1,14 @@
+# π¨ Stage 1: Builder
+FROM golang:1.22-alpine AS builder
+WORKDIR /app
+COPY go.mod ./
+RUN go mod download
+COPY . .
+RUN CGO_ENABLED=0 go build -o myapp
+
+# π Stage 2: Runtime
+FROM gcr.io/distroless/static-debian12:nonroot
+WORKDIR /app
+COPY --from=builder /app/myapp .
+EXPOSE 8080
+CMD ["./myapp"]
\ No newline at end of file
diff --git a/app_go/README.md b/app_go/README.md
new file mode 100644
index 0000000000..f2870f9688
--- /dev/null
+++ b/app_go/README.md
@@ -0,0 +1,47 @@
+[](https://github.com/newspec/DevOps-Core-Course/actions/workflows/go-ci.yml?query=branch%3Alab03)
+[](https://codecov.io/gh/newspec/DevOps-Core-Course/branch/lab03?flag=go)
+
+
+# devops-info-service (Go)
+
+## Overview
+`devops-info-service` is a lightweight HTTP service written in Go. It returns:
+- service metadata (name, version, description, framework),
+- system information (hostname, OS/platform, architecture, CPU count, Go version),
+- runtime information (uptime, current UTC time),
+- request information (client IP, user-agent, method, path),
+- a list of available endpoints.
+
+This is useful for DevOps labs and basic observability: quick environment inspection and health checks.
+
+---
+
+## Prerequisites
+- **Go:** 1.22+ (recommended)
+- No external dependencies (standard library only)
+
+---
+
+## Installation
+```bash
+cd app_go
+go mod tidy
+```
+
+## Running the Application
+```bash
+go run .
+```
+
+## API Endpoints
+- `GET /` - Service and system information
+- `GET /health` - Health check
+
+## Configuration
+
+The application is configured using environment variables.
+
+| Variable | Default | Description | Example |
+|---------|---------|-------------|---------|
+| `HOST` | `0.0.0.0` | Host interface to bind the server to | `0.0.0.0` |
+| `PORT` | `8080` | Port the server listens on | `8080` |
diff --git a/app_go/docs/GO.md b/app_go/docs/GO.md
new file mode 100644
index 0000000000..39785012a7
--- /dev/null
+++ b/app_go/docs/GO.md
@@ -0,0 +1,5 @@
+### Why Go?
+- **Compiled binary**: produces a single executable (useful for multi-stage Docker builds).
+- **Fast startup and low overhead**: good for microservices.
+- **Standard library is enough**: `net/http` covers routing and HTTP server without external frameworks.
+- **Great DevOps fit**: simple deployment, small runtime requirements.
diff --git a/app_go/docs/LAB01.md b/app_go/docs/LAB01.md
new file mode 100644
index 0000000000..52920f06bc
--- /dev/null
+++ b/app_go/docs/LAB01.md
@@ -0,0 +1,217 @@
+# Lab 1 (Bonus) β DevOps Info Service in Go
+
+## 1. Language / Framework Selection
+
+### Choice
+I implemented the bonus service in **Go** using the standard library **net/http** package.
+
+### Why Go?
+- **Compiled binary**: produces a single executable (useful for multi-stage Docker builds).
+- **Fast startup and low overhead**: good for microservices.
+- **Standard library is enough**: `net/http` covers routing and HTTP server without external frameworks.
+- **Great DevOps fit**: simple deployment, small runtime requirements.
+
+### Comparison with Alternatives
+
+| Criteria | Go (net/http) (chosen) | Rust | Java (Spring Boot) | C# (ASP.NET Core) |
+|---------|--------------------------|------|---------------------|-------------------|
+| Build artifact | Single binary | Single binary | JVM app + deps | .NET app + deps |
+| Startup time | Fast | Fast | Usually slower | Medium |
+| Runtime deps | None | None | JVM required | .NET runtime |
+| HTTP stack | stdlib | frameworks (Axum/Actix) | Spring ecosystem | ASP.NET stack |
+| Complexity | Low | Mediumβhigh | Medium | Medium |
+| Best fit for this lab | Excellent | Good | Overkill | Good |
+
+---
+
+## 2. Best Practices Applied
+
+### 2.1 Clean Code Organization
+- Clear data models (`ServiceInfo`, `Service`, `System`, `RuntimeInfo`, `RequestInfo`, `Endpoint`).
+- Helper functions for concerns separation:
+ - `runtimeInfo()`, `requestInfo()`, `uptime()`, `isoUTCNow()`, `clientIP()`, `writeJSON()`.
+
+### 2.2 Configuration via Environment Variables
+The service is configurable via environment variables:
+- `HOST` (default `0.0.0.0`)
+- `PORT` (default `8080`)
+- `DEBUG` (default `false`)
+
+Implementation uses a simple helper:
+```go
+func getenv(key, def string) string {
+ v := os.Getenv(key)
+ if v == "" {
+ return def
+ }
+ return v
+}
+```
+
+### 2.3 Logging Middleware
+Request logging is implemented as middleware:
+```go
+func withLogging(logger *log.Logger) func(http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ start := time.Now()
+ next.ServeHTTP(w, r)
+ logger.Printf("%s %s (%s) from %s in %s",
+ r.Method, r.URL.Path, r.Proto, r.RemoteAddr, time.Since(start))
+ })
+ }
+}
+```
+
+### 2.4 Error Handling
+#### 404 Not Found
+Unknown endpoints return a consistent JSON error:
+```json
+{
+ "error": "Not Found",
+ "message": "Endpoint does not exist"
+}
+```
+This is implemented via a wrapper that enforces valid paths:
+```go
+func withNotFound(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" && r.URL.Path != "/health" {
+ writeJSON(w, http.StatusNotFound, ErrorResponse{
+ Error: "Not Found",
+ Message: "Endpoint does not exist",
+ })
+ return
+ }
+ next.ServeHTTP(w, r)
+ })
+}
+```
+#### 500 Internal Server Error (panic recovery)
+A recover middleware prevents crashes and returns a safe JSON response:
+```go
+func withRecover(logger *log.Logger) func(http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer func() {
+ if rec := recover(); rec != nil {
+ logger.Printf("panic recovered: %v", rec)
+ writeJSON(w, http.StatusInternalServerError, ErrorResponse{
+ Error: "Internal Server Error",
+ Message: "An unexpected error occurred",
+ })
+ }
+ }()
+ next.ServeHTTP(w, r)
+ })
+ }
+}
+```
+### 2.5 Production-Friendly HTTP Server Settings
+The service uses `http.Server` with timeouts:
+```go
+srv := &http.Server{
+ Addr: addr,
+ Handler: handler,
+ ReadHeaderTimeout: 5 * time.Second,
+}
+```
+## 3. API Documentation
+### 3.1 GET / β Service and System Information
+**Description**: Returns service metadata, system info, runtime info, request info, and available endpoints.
+
+**Request**:
+```bash
+curl -i http://127.0.0.1:8080/
+```
+**Response (200 OK) example**:
+```json
+{
+ "service": {
+ "name": "devops-info-service",
+ "version": "1.0.0",
+ "description": "DevOps course info service",
+ "framework": "Go net/http"
+ },
+ "system": {
+ "hostname": "DESKTOP-KUN1CI4",
+ "platform": "windows",
+ "platform_version": "unknown",
+ "architecture": "amd64",
+ "cpu_count": 8,
+ "go_version": "go1.25.6"
+ },
+ "runtime": {
+ "uptime_seconds": 6,
+ "uptime_human": "0 hours, 0 minutes",
+ "current_time": "2026-01-25T17:17:32.248Z",
+ "timezone": "UTC"
+ },
+ "request": {
+ "client_ip": "::1",
+ "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/144.0.0.0 Safari/537.36 Edg/144.0.0.0",
+ "method": "GET",
+ "path": "/"
+ },
+ "endpoints": [
+ {
+ "path": "/",
+ "method": "GET",
+ "description": "Service information"
+ },
+ {
+ "path": "/health",
+ "method": "GET",
+ "description": "Health check"
+ }
+ ]
+}
+```
+### 3.2 GET /health β Health Check
+**Description**: Description: Simple health endpoint used for monitoring and probes.
+
+**Request**:
+```bash
+curl -i http://127.0.0.1:8080/health
+```
+**Response (200 OK) example**:
+```json
+{
+ "status": "healthy",
+ "timestamp": "2026-01-25T17:19:02.582Z",
+ "uptime_seconds": 96
+}
+```
+### 3.3 404 Behavior
+**Request**:
+```bash
+curl -i http://127.0.0.1:8080/does-not-exist
+```
+**Response (404 Not Found)**:
+```json
+{
+ "error": "Not Found",
+ "message": "Endpoint does not exist"
+}
+```
+
+## 4. Build & Run Instructions
+### 4.1 Run locally (no build)
+```bash
+go run main.go
+```
+### 4.2 Build binary
+```bash
+go build -o devops-info-service main.go
+```
+Run:
+```bash
+./devops-info-service
+```
+### 4.3 Environment variables examples
+```bash
+HOST=127.0.0.1 PORT=3000 ./devops-info-service
+DEBUG=true PORT=8081 ./devops-info-service
+```
+## 5. Challenges & Solutions
+I don't know how `go` works.
diff --git a/app_go/docs/LAB02.md b/app_go/docs/LAB02.md
new file mode 100644
index 0000000000..ee98527851
--- /dev/null
+++ b/app_go/docs/LAB02.md
@@ -0,0 +1,215 @@
+# Multi-stage build strategy
+## Stage 1 - Builder
+**Purpose:** compile the Go application using a full Go toolchain image.
+
+**Key points:**
+- Uses `golang:1.22-alpine` to keep the builder stage smaller than Debian-based images.
+- Copies `go.mod` first and runs `go mod download` to maximize Docker layer caching.
+- Builds a Linux binary with `CGO_ENABLED=0` (static binary), which allows a minimal runtime image.
+
+**Dockerfile snippet:**
+```dockerfile
+FROM golang:1.22-alpine AS builder
+WORKDIR /app
+COPY go.mod ./
+RUN go mod download
+COPY . .
+RUN CGO_ENABLED=0 go build -o myapp
+```
+
+## Stage 2 - Runtime
+**Purpose:** run only the compiled binary in a minimal image with a non-root user.
+
+**Key points:**
+- Uses `gcr.io/distroless/static-debian12:nonroot`.
+- Distroless images contain only what is required to run the app (no package manager, no shell), reducing image size and attack surface.
+- Runs as a **non-root** user (provided by the `:nonroot` tag).
+
+**Dockerfile snippet:**
+```dockerfile
+FROM gcr.io/distroless/static-debian12:nonroot
+WORKDIR /app
+COPY --from=builder /app/myapp .
+EXPOSE 8080
+CMD ["./myapp"]
+```
+
+# Size comparison with analysis (builder vs final image)
+## Builder
+```bash
+docker images newspec/app_go:builder
+REPOSITORY TAG IMAGE ID CREATED SIZE
+newspec/app_go builder 21b01f8c6103 37 minutes ago 305MB
+```
+## Final
+```bash
+docker images newspec/app_go:1.0
+REPOSITORY TAG IMAGE ID CREATED SIZE
+newspec/app_go 1.0 a944205e6030 59 minutes ago 9.32MB
+```
+## Analysis
+The builder image contains the Go toolchain and build dependencies, so it is significantly larger.
+The final image contains only the static binary, which is much smaller and safer.
+
+# Why multi-stage builds matter for compiled languages
+
+Compiled languages (like Go, Rust, Java with native images, etc.) typically require a **heavy build environment**: compilers, linkers, SDKs, package managers, and temporary build artifacts. If you ship that same environment as your runtime container, the final image becomes unnecessarily large and less secure.
+
+Multi-stage builds solve this by separating concerns:
+
+### 1) Smaller final images (faster pull & deploy)
+- The **builder stage** includes the full toolchain (large).
+- The **runtime stage** contains only the compiled output (usually just a single binary).
+This dramatically reduces image size, which improves:
+- CI/CD speed (less time to push/pull)
+- Startup speed (faster distribution in clusters)
+- Bandwidth/storage usage
+
+### 2) Better security (smaller attack surface)
+The runtime image no longer contains:
+- compilers (go, gcc, build tools)
+- package managers
+- shells and utilities (especially with distroless/scratch)
+
+Fewer components, so fewer potential vulnerabilities (CVEs) and fewer tools available to an attacker if the container is compromised.
+
+### 3) Cleaner separation of build vs runtime
+Multi-stage builds enforce a clear boundary:
+- build dependencies exist only where needed (builder stage)
+- runtime image stays minimal and focused on execution
+
+This makes the container easier to reason about and maintain.
+
+### 4) Reproducible and cache-friendly builds
+When the Dockerfile copies dependency descriptors first (e.g., `go.mod`/`go.sum`) and downloads dependencies before copying source code:
+- Docker can reuse cached layers if dependencies are unchanged
+- rebuilds after code changes are significantly faster
+
+### 5) Enables ultra-minimal runtime images
+Compiled apps can often run in very small base images:
+- `distroless` (secure and minimal)
+- `scratch` (almost empty)
+
+This is usually impossible for interpreted languages without bundling an interpreter runtime.
+
+**Summary:** For compiled languages, multi-stage builds provide the best of both worlds β a full build environment when you need it, and a minimal secure runtime image when you deploy.
+
+# Terminal output showing build process
+## Builder
+```bash
+docker build --target builder -t newspec/app_go:builder .
+[+] Building 2.4s (12/12) FINISHED docker:desktop-linux
+ => [internal] load build definition from Dockerfile 0.0s
+ => => transferring dockerfile: 349B 0.0s
+ => [internal] load metadata for docker.io/library/golang:1.22-alpine 2.1s
+ => [auth] library/golang:pull token for registry-1.docker.io 0.0s
+ => [internal] load .dockerignore 0.1s
+ => => transferring context: 105B 0.0s
+ => [builder 1/6] FROM docker.io/library/golang:1.22-alpine@sha256:1699c10032ca2582ec89a24a1312d986a3f094aed3d5c1147b19880afe40e052 0.0s
+ => [internal] load build context 0.0s
+ => => transferring context: 146B 0.0s
+ => CACHED [builder 2/6] WORKDIR /app 0.0s
+ => CACHED [builder 3/6] COPY go.mod ./ 0.0s
+ => CACHED [builder 4/6] RUN go mod download 0.0s
+ => CACHED [builder 5/6] COPY . . 0.0s
+ => CACHED [builder 6/6] RUN CGO_ENABLED=0 go build -o myapp 0.0s
+ => exporting to image 0.0s
+ => => exporting layers 0.0s
+ => => writing image sha256:21b01f8c61038149b9130afe7881765d625b2eb6622b6b46f42682d26b10ae2b 0.0s
+ => => naming to docker.io/newspec/app_go:builder 0.0s
+
+View build details: docker-desktop://dashboard/build/desktop-linux/desktop-linux/wvw3g1yzoqu1uput2mz8me7zx
+```
+
+## Final
+```bash
+docker build -t newspec/app_go:1.0 .
+[+] Building 1.5s (15/15) FINISHED docker:desktop-linux
+ => [internal] load build definition from Dockerfile 0.0s
+ => => transferring dockerfile: 349B 0.0s
+ => [internal] load metadata for gcr.io/distroless/static-debian12:nonroot 1.0s
+ => [internal] load metadata for docker.io/library/golang:1.22-alpine 0.7s
+ => [internal] load .dockerignore 0.0s
+ => => transferring context: 105B 0.0s
+ => [builder 1/6] FROM docker.io/library/golang:1.22-alpine@sha256:1699c10032ca2582ec89a24a1312d986a3f094aed3d5c1147b19880afe40e052 0.0s
+ => [stage-1 1/3] FROM gcr.io/distroless/static-debian12:nonroot@sha256:cba10d7abd3e203428e86f5b2d7fd5eb7d8987c387864ae4996cf97191b33764 0.0s
+ => [internal] load build context 0.0s
+ => => transferring context: 146B 0.0s
+ => CACHED [builder 2/6] WORKDIR /app 0.0s
+ => CACHED [builder 3/6] COPY go.mod ./ 0.0s
+ => CACHED [builder 4/6] RUN go mod download 0.0s
+ => CACHED [builder 5/6] COPY . . 0.0s
+ => CACHED [builder 6/6] RUN CGO_ENABLED=0 go build -o myapp 0.0s
+ => CACHED [stage-1 2/3] WORKDIR /app 0.0s
+ => CACHED [stage-1 3/3] COPY --from=builder /app/myapp . 0.0s
+ => exporting to image 0.1s
+ => => exporting layers 0.0s
+ => => writing image sha256:c9ff1572d8a13240f00ef7d66683264e0fbf4fa77c12790dc3f3428972819321 0.0s
+ => => naming to docker.io/newspec/app_go:1.0 0.0s
+
+View build details: docker-desktop://dashboard/build/desktop-linux/desktop-linux/nvdyhylzo1hzpemy23lt42ll1
+```
+# Technical explanation of each stage's purpose
+### Stage 1 β Builder (Compile Environment)
+**Goal:** Produce a Linux executable from Go source code in a controlled build environment.
+
+**Why this stage exists:**
+- Go compilation requires the Go toolchain (compiler, linker) which is large and should not be shipped in the final runtime image.
+- The builder image provides everything needed to compile the application.
+
+**What happens technically:**
+1. **Set working directory**
+ - `WORKDIR /app` defines where source code and build steps run inside the container.
+
+2. **Copy dependency definition first**
+ - `COPY go.mod ./` is done before copying the whole source tree.
+ - This allows Docker to cache the dependency download layer.
+ - Even if code changes, dependencies may not, so rebuilds are faster.
+
+3. **Download modules**
+ - `RUN go mod download` fetches required modules.
+ - In this project there are no external module dependencies, so Go prints:
+ `go: no module dependencies to download`
+ - The step is still good practice and keeps the Dockerfile consistent for future changes.
+
+4. **Copy application source code**
+ - `COPY . .` brings in the Go source files.
+ - This layer changes most often, so it comes after dependency caching steps.
+
+5. **Compile a static binary**
+ - `RUN CGO_ENABLED=0 go build -o myapp`
+ - `CGO_ENABLED=0` disables C bindings so the binary is statically linked.
+ - A static binary does not require libc or other runtime shared libraries, enabling minimal runtime images.
+
+**Output of the stage:** a compiled executable (`/app/myapp`).
+
+---
+
+### Stage 2 β Runtime (Execution Environment)
+**Goal:** Run only the compiled binary in a minimal and secure container image.
+
+**Why this stage exists:**
+- The runtime stage should not contain compilers, source code, or build tools.
+- A smaller runtime image reduces attack surface and improves deployment speed.
+
+**What happens technically:**
+1. **Choose a minimal base image**
+ - `FROM gcr.io/distroless/static-debian12:nonroot`
+ - Distroless images contain only the minimum required runtime files.
+ - The `:nonroot` variant runs as a non-root user by default.
+
+2. **Set working directory**
+ - `WORKDIR /app` provides a predictable location for the binary.
+
+3. **Copy only the build artifact**
+ - `COPY --from=builder /app/myapp .`
+ - This copies only the compiled binary from the builder stage.
+ - No source code, no Go toolchain, no dependency caches are included.
+
+4. **Run the application**
+ - `CMD ["./myapp"]` starts the service.
+ - The application reads `HOST` and `PORT` environment variables:
+ - defaults: `HOST=0.0.0.0`, `PORT=8080`
+ - When running the container, port mapping must match the internal listening port (e.g., `-p 8000:8080`).
+
+**Output of the stage:** a minimal runtime container that executes the Go binary as a non-root user.
\ No newline at end of file
diff --git a/app_go/docs/LAB03.md b/app_go/docs/LAB03.md
new file mode 100644
index 0000000000..e039089a71
--- /dev/null
+++ b/app_go/docs/LAB03.md
@@ -0,0 +1,86 @@
+# LAB03 (Go) β Bonus: Multi-App CI + Path Filters + Coverage
+
+## 1) Second workflow implementation (Go CI) + language-specific best practices
+
+A separate workflow file is added for the Go application:
+
+- `.github/workflows/go-ci.yml`
+
+It implements Go-specific CI best practices:
+
+- **Setup Go toolchain** via `actions/setup-go` (Go 1.22+)
+- **Formatting check**: `gofmt -l .` must return empty output
+- **Linting**: `golangci-lint` (industry-standard Go linter aggregator)
+- **Unit tests**: `go test ./...`
+- **Coverage generation**: `go test -coverprofile=coverage.out ./...`
+- **Docker build/push**: multi-stage Docker build using the existing `app_go/Dockerfile` (builder stage + distroless runtime)
+
+Docker image tagging follows the same CalVer strategy as Python:
+
+- `YYYY.MM.DD` (CalVer)
+- `${GITHUB_SHA}` (commit SHA)
+- `latest`
+
+## 2) Path filter configuration + testing proof
+
+The Go workflow is triggered only when Go-related files change:
+
+- `app_go/**`
+- `.github/workflows/go-ci.yml`
+
+This prevents unnecessary CI runs for unrelated parts of the monorepo.
+
+### Proof (selective triggering)
+
+Provide evidence with 2 small commits:
+
+1) **Change only Go files**
+ - https://github.com/newspec/DevOps-Core-Course/actions/runs/21837847722
+
+2) **Change only Python files**
+ - https://github.com/newspec/DevOps-Core-Course/actions/runs/21838134121
+
+## 3) Benefits analysis β why path filters matter in monorepos
+
+Path filters are important in monorepos because they:
+
+- **Save CI time and compute**: no need to run Go CI when only Python changes (and vice versa)
+- **Reduce noise in PR checks**: fewer irrelevant checks, faster feedback for reviewers
+- **Improve developer experience**: faster iteration and fewer βunrelated failuresβ
+- **Scale better** as more apps/labs are added to the same repository
+
+## 4) Example showing workflows running independently
+
+Example scenario:
+
+- Commit A modifies only `app_go/**` β only Go CI runs
+- Commit B modifies only `app_python/**` β only Python CI runs
+- Commit C modifies both `app_go/**` and `app_python/**` β both workflows run in parallel
+
+
+
+## 5) Terminal output / Actions evidence (selective triggering)
+
+See links and screenshot above.
+
+## 6) Coverage integration (dashboard link / screenshot)
+
+
+## 7) Coverage analysis (current percentage, covered/not covered, threshold)
+
+### Current coverage
+
+Current Go coverage: 44%
+Current Python coverage 99%
+
+### What is covered (Go)
+- `GET /` handler (`mainHandler`) returns status `200` and contains required top-level JSON keys:
+ `service`, `system`, `runtime`, `request`, `endpoints`
+- `GET /health` handler (`healthHandler`) returns status `200` and contains required keys:
+ `status`, `timestamp`, `uptime_seconds`
+
+### What is not covered (Go)
+- Middleware behavior (`withRecover`, `withLogging`, `withNotFound`)
+- Negative/error scenarios (e.g., unknown path via middleware, panic recovery)
+- Strict validation of dynamic fields (timestamps formatting beyond basic checks)
+
diff --git a/app_go/docs/screenshots/01-main-endpoint.png b/app_go/docs/screenshots/01-main-endpoint.png
new file mode 100644
index 0000000000..c0f1b2ed5c
Binary files /dev/null and b/app_go/docs/screenshots/01-main-endpoint.png differ
diff --git a/app_go/docs/screenshots/02-health-check.png b/app_go/docs/screenshots/02-health-check.png
new file mode 100644
index 0000000000..aed7a37918
Binary files /dev/null and b/app_go/docs/screenshots/02-health-check.png differ
diff --git a/app_go/docs/screenshots/03-formatted-output.png b/app_go/docs/screenshots/03-formatted-output.png
new file mode 100644
index 0000000000..ec011296fc
Binary files /dev/null and b/app_go/docs/screenshots/03-formatted-output.png differ
diff --git a/app_go/docs/screenshots/coverage.png b/app_go/docs/screenshots/coverage.png
new file mode 100644
index 0000000000..1c9c2434ef
Binary files /dev/null and b/app_go/docs/screenshots/coverage.png differ
diff --git a/app_go/docs/screenshots/proof_of_path_validations.png b/app_go/docs/screenshots/proof_of_path_validations.png
new file mode 100644
index 0000000000..00df9e7997
Binary files /dev/null and b/app_go/docs/screenshots/proof_of_path_validations.png differ
diff --git a/app_go/go.mod b/app_go/go.mod
new file mode 100644
index 0000000000..43fa976d01
--- /dev/null
+++ b/app_go/go.mod
@@ -0,0 +1,3 @@
+module devops-info-service
+
+go 1.22
\ No newline at end of file
diff --git a/app_go/main.go b/app_go/main.go
new file mode 100644
index 0000000000..1e5f3f1abe
--- /dev/null
+++ b/app_go/main.go
@@ -0,0 +1,277 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "runtime"
+ "strings"
+ "time"
+)
+
+type ServiceInfo struct {
+ Service Service `json:"service"`
+ System System `json:"system"`
+ Runtime RuntimeInfo `json:"runtime"`
+ Request RequestInfo `json:"request"`
+ Endpoints []Endpoint `json:"endpoints"`
+}
+
+type Service struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Description string `json:"description"`
+ Framework string `json:"framework"`
+}
+
+type System struct {
+ Hostname string `json:"hostname"`
+ Platform string `json:"platform"`
+ PlatformVersion string `json:"platform_version"`
+ Architecture string `json:"architecture"`
+ CPUCount int `json:"cpu_count"`
+ GoVersion string `json:"go_version"`
+}
+
+type RuntimeInfo struct {
+ UptimeSeconds int `json:"uptime_seconds"`
+ UptimeHuman string `json:"uptime_human"`
+ CurrentTime string `json:"current_time"`
+ Timezone string `json:"timezone"`
+}
+
+type RequestInfo struct {
+ ClientIP string `json:"client_ip"`
+ UserAgent string `json:"user_agent"`
+ Method string `json:"method"`
+ Path string `json:"path"`
+}
+
+type Endpoint struct {
+ Path string `json:"path"`
+ Method string `json:"method"`
+ Description string `json:"description"`
+}
+
+type ErrorResponse struct {
+ Error string `json:"error"`
+ Message string `json:"message"`
+}
+
+var startTime = time.Now().UTC()
+
+func main() {
+ host := getenv("HOST", "0.0.0.0")
+ port := getenv("PORT", "8080")
+ debug := strings.ToLower(getenv("DEBUG", "false")) == "true"
+
+ logger := log.New(os.Stdout, "", log.LstdFlags)
+ if debug {
+ logger.SetFlags(log.LstdFlags | log.Lshortfile)
+ }
+
+ mux := http.NewServeMux()
+
+ // endpoints
+ mux.HandleFunc("/", mainHandler)
+ mux.HandleFunc("/health", healthHandler)
+
+ // wrap with middleware: recover + logging + 404
+ handler := withRecover(logger)(withLogging(logger)(withNotFound(mux)))
+
+ addr := fmt.Sprintf("%s:%s", host, port)
+ logger.Printf("Application starting on http://%s\n", addr)
+
+ // http.Server allows timeouts (good practice)
+ srv := &http.Server{
+ Addr: addr,
+ Handler: handler,
+ ReadHeaderTimeout: 5 * time.Second,
+ }
+
+ if err := srv.ListenAndServe(); err != nil {
+ logger.Fatalf("server error: %v", err)
+ }
+}
+
+func mainHandler(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ // will be caught by notFound wrapper, but this is extra safety
+ writeJSON(w, http.StatusNotFound, ErrorResponse{
+ Error: "Not Found",
+ Message: "Endpoint does not exist",
+ })
+ return
+ }
+
+ info := ServiceInfo{
+ Service: Service{
+ Name: "devops-info-service",
+ Version: "1.0.0",
+ Description: "DevOps course info service",
+ Framework: "Go net/http",
+ },
+ System: System{
+ Hostname: hostname(),
+ Platform: runtime.GOOS,
+ PlatformVersion: platformVersion(),
+ Architecture: runtime.GOARCH,
+ CPUCount: runtime.NumCPU(),
+ GoVersion: runtime.Version(),
+ },
+ Runtime: runtimeInfo(),
+ Request: requestInfo(r),
+ Endpoints: []Endpoint{
+ {Path: "/", Method: "GET", Description: "Service information"},
+ {Path: "/health", Method: "GET", Description: "Health check"},
+ },
+ }
+
+ writeJSON(w, http.StatusOK, info)
+}
+
+func healthHandler(w http.ResponseWriter, r *http.Request) {
+ uptimeSeconds, _ := uptime()
+ resp := map[string]any{
+ "status": "healthy",
+ "timestamp": isoUTCNow(),
+ "uptime_seconds": uptimeSeconds,
+ }
+ writeJSON(w, http.StatusOK, resp)
+}
+
+func runtimeInfo() RuntimeInfo {
+ secs, human := uptime()
+ return RuntimeInfo{
+ UptimeSeconds: secs,
+ UptimeHuman: human,
+ CurrentTime: isoUTCNow(),
+ Timezone: "UTC",
+ }
+}
+
+func requestInfo(r *http.Request) RequestInfo {
+ ip := clientIP(r)
+ ua := r.Header.Get("User-Agent")
+ return RequestInfo{
+ ClientIP: ip,
+ UserAgent: ua,
+ Method: r.Method,
+ Path: r.URL.Path,
+ }
+}
+
+func uptime() (int, string) {
+ delta := time.Since(startTime)
+ seconds := int(delta.Seconds())
+ hours := seconds / 3600
+ minutes := (seconds % 3600) / 60
+ return seconds, fmt.Sprintf("%d hours, %d minutes", hours, minutes)
+}
+
+func isoUTCNow() string {
+ // "2026-01-07T14:30:00.000Z"
+ return time.Now().UTC().Format("2006-01-02T15:04:05.000Z")
+}
+
+func hostname() string {
+ h, err := os.Hostname()
+ if err != nil {
+ return "unknown"
+ }
+ return h
+}
+
+func platformVersion() string {
+ // Best effort for Linux: /etc/os-release PRETTY_NAME (e.g., "Ubuntu 24.04.1 LTS")
+ if runtime.GOOS != "linux" {
+ return "unknown"
+ }
+ data, err := os.ReadFile("/etc/os-release")
+ if err != nil {
+ return "unknown"
+ }
+ lines := strings.Split(string(data), "\n")
+ for _, line := range lines {
+ if strings.HasPrefix(line, "PRETTY_NAME=") {
+ val := strings.TrimPrefix(line, "PRETTY_NAME=")
+ val = strings.Trim(val, `"`)
+ if val != "" {
+ return val
+ }
+ }
+ }
+ return "unknown"
+}
+
+func clientIP(r *http.Request) string {
+ // If behind proxy, you might consider X-Forwarded-For, but for lab keep it simple.
+ host, _, err := net.SplitHostPort(r.RemoteAddr)
+ if err == nil && host != "" {
+ return host
+ }
+ // fallback: may already be just an IP
+ return r.RemoteAddr
+}
+
+func writeJSON(w http.ResponseWriter, statusCode int, payload any) {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(statusCode)
+ _ = json.NewEncoder(w).Encode(payload)
+}
+
+func getenv(key, def string) string {
+ v := os.Getenv(key)
+ if v == "" {
+ return def
+ }
+ return v
+}
+
+/* ---------------- Middleware (Best Practices) ---------------- */
+
+func withLogging(logger *log.Logger) func(http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ start := time.Now()
+ next.ServeHTTP(w, r)
+ logger.Printf("%s %s (%s) from %s in %s",
+ r.Method, r.URL.Path, r.Proto, r.RemoteAddr, time.Since(start))
+ })
+ }
+}
+
+func withRecover(logger *log.Logger) func(http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer func() {
+ if rec := recover(); rec != nil {
+ logger.Printf("panic recovered: %v", rec)
+ writeJSON(w, http.StatusInternalServerError, ErrorResponse{
+ Error: "Internal Server Error",
+ Message: "An unexpected error occurred",
+ })
+ }
+ }()
+ next.ServeHTTP(w, r)
+ })
+ }
+}
+
+func withNotFound(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Use ServeMux; if it doesn't match, it still calls handler with pattern "/"
+ // So we enforce our own 404 for unknown endpoints.
+ if r.URL.Path != "/" && r.URL.Path != "/health" {
+ writeJSON(w, http.StatusNotFound, ErrorResponse{
+ Error: "Not Found",
+ Message: "Endpoint does not exist",
+ })
+ return
+ }
+ next.ServeHTTP(w, r)
+ })
+}
diff --git a/app_go/main_test.go b/app_go/main_test.go
new file mode 100644
index 0000000000..a273d1f240
--- /dev/null
+++ b/app_go/main_test.go
@@ -0,0 +1,56 @@
+package main
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestRootOK(t *testing.T) {
+ req := httptest.NewRequest(http.MethodGet, "/", nil)
+ rr := httptest.NewRecorder()
+
+ mainHandler(rr, req)
+
+ if rr.Code != http.StatusOK {
+ t.Fatalf("expected 200, got %d", rr.Code)
+ }
+
+ var data map[string]any
+ if err := json.Unmarshal(rr.Body.Bytes(), &data); err != nil {
+ t.Fatalf("invalid json: %v", err)
+ }
+
+ // top-level keys
+ for _, k := range []string{"service", "system", "runtime", "request", "endpoints"} {
+ if _, ok := data[k]; !ok {
+ t.Fatalf("missing key: %s", k)
+ }
+ }
+}
+
+func TestHealthOK(t *testing.T) {
+ req := httptest.NewRequest(http.MethodGet, "/health", nil)
+ rr := httptest.NewRecorder()
+
+ healthHandler(rr, req)
+
+ if rr.Code != http.StatusOK {
+ t.Fatalf("expected 200, got %d", rr.Code)
+ }
+
+ var data map[string]any
+ if err := json.Unmarshal(rr.Body.Bytes(), &data); err != nil {
+ t.Fatalf("invalid json: %v", err)
+ }
+
+ for _, k := range []string{"status", "timestamp", "uptime_seconds"} {
+ if _, ok := data[k]; !ok {
+ t.Fatalf("missing key: %s", k)
+ }
+ }
+ if data["status"] != "healthy" {
+ t.Fatalf("expected status healthy, got %v", data["status"])
+ }
+}
diff --git a/app_python/.dockerignore b/app_python/.dockerignore
new file mode 100644
index 0000000000..44fa25304b
--- /dev/null
+++ b/app_python/.dockerignore
@@ -0,0 +1,22 @@
+# π Version control
+.git
+.gitignore
+
+# π Python
+__pycache__
+*.pyc
+*.pyo
+venv/
+.venv/
+
+# π Secrets (NEVER include!)
+.env
+*.pem
+secrets/
+
+# π Documentation
+*.md
+docs/
+
+# π§ͺ Tests (if not needed in container)
+tests/
\ No newline at end of file
diff --git a/app_python/.gitignore b/app_python/.gitignore
new file mode 100644
index 0000000000..4de420a8f7
--- /dev/null
+++ b/app_python/.gitignore
@@ -0,0 +1,12 @@
+# Python
+__pycache__/
+*.py[cod]
+venv/
+*.log
+
+# IDE
+.vscode/
+.idea/
+
+# OS
+.DS_Store
\ No newline at end of file
diff --git a/app_python/Dockerfile b/app_python/Dockerfile
new file mode 100644
index 0000000000..0364cfd9a7
--- /dev/null
+++ b/app_python/Dockerfile
@@ -0,0 +1,14 @@
+FROM python:3.12-slim
+
+RUN useradd --create-home --shell /bin/bash appuser
+
+WORKDIR /app
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+COPY app.py .
+
+EXPOSE 8000
+
+USER appuser
+
+CMD ["python", "app.py"]
\ No newline at end of file
diff --git a/app_python/README.md b/app_python/README.md
new file mode 100644
index 0000000000..170f1cac89
--- /dev/null
+++ b/app_python/README.md
@@ -0,0 +1,76 @@
+[](https://github.com/newspec/DevOps-Core-Course/actions/workflows/python-ci.yml?query=branch%3Alab03)
+[](https://codecov.io/gh/newspec/DevOps-Core-Course/branch/lab03?flag=python)
+
+
+# devops-info-service
+
+## Overview
+`devops-info-service` is a lightweight HTTP service built with **FastAPI** that returns comprehensive runtime and system information. It exposes:
+- service metadata (name, version, description, framework),
+- system details (hostname, OS/platform, architecture, CPU count, Python version),
+- runtime data (uptime, current UTC time),
+- request details (client IP, user-agent, method, path),
+- a list of available endpoints.
+
+## Prerequisites
+- **Python:** 3.10+ (recommended 3.11+)
+- **Dependencies:** listed in `requirements.txt`
+
+## Installation
+
+```
+python -m venv venv
+source venv/bin/activate
+pip install -r requirements.txt
+```
+
+## Running the Application
+```
+python app.py
+# Or with custom config
+PORT=8080 python app.py
+```
+
+## API Endpoints
+- `GET /` - Service and system information
+- `GET /health` - Health check
+
+## Configuration
+
+The application is configured using environment variables.
+
+| Variable | Default | Description | Example |
+|---------|---------|-------------|-------------|
+| `HOST` | `0.0.0.0` | Host interface to bind the server to | `127.0.0.1` |
+| `PORT` | `8000` | Port the server listens on | `8080` |
+
+# Docker
+
+## Building the image locally
+Command pattern:
+```bash
+docker build -t :
+```
+
+## Running a container
+Command pattern:
+```bash
+docker run --rm -p : :
+```
+
+## Pulling from Docker Hub
+Command pattern:
+```bash
+docker pull /:
+```
+Then run:
+```bash
+docker run --rm -p : /:
+```
+
+# Testing
+To run test locally use command:
+```bash
+pytest
+```
+
diff --git a/app_python/app.py b/app_python/app.py
new file mode 100644
index 0000000000..8cd20e0680
--- /dev/null
+++ b/app_python/app.py
@@ -0,0 +1,162 @@
+"""
+DevOps Info Service
+Main application module
+"""
+import logging
+import os
+import platform
+import socket
+from datetime import datetime, timezone
+
+import uvicorn
+from fastapi import FastAPI, Request, status
+from fastapi.responses import JSONResponse
+from starlette.exceptions import HTTPException
+
+app = FastAPI()
+
+# Configuration
+HOST = os.getenv("HOST", "0.0.0.0")
+PORT = int(os.getenv("PORT", 8000))
+DEBUG = os.getenv("DEBUG", "False").lower() == "true"
+
+logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+)
+logger = logging.getLogger(__name__)
+
+# Application start time
+start_time = datetime.now()
+
+
+def get_service_info():
+ """Get information about service."""
+ logger.debug('Getting info about the service.')
+ return {
+ "name": "devops-info-service",
+ "version": "1.0.0",
+ "description": "DevOps course info service",
+ "framework": "FastAPI",
+ }
+
+
+def get_system_info():
+ """Get information about system."""
+ logger.debug('Getting info about the system.')
+ return {
+ "hostname": socket.gethostname(),
+ "platform": platform.system(),
+ "platform_version": platform.version(),
+ "architecture": platform.machine(),
+ "cpu_count": os.cpu_count(),
+ "python_version": platform.python_version(),
+ }
+
+
+def get_uptime():
+ """Get uptime."""
+ logger.debug('Getting uptime.')
+ delta = datetime.now() - start_time
+ seconds = int(delta.total_seconds())
+ hours = seconds // 3600
+ minutes = (seconds % 3600) // 60
+ return {"seconds": seconds, "human": f"{hours} hours, {minutes} minutes"}
+
+
+def get_runtime_info():
+ """Get information about runtime."""
+ logger.debug('Getting runtime info.')
+ uptime = get_uptime()
+ uptime_seconds, uptime_human = uptime["seconds"], uptime["human"]
+ current_time = datetime.now(timezone.utc)
+
+ return {
+ "uptime_seconds": uptime_seconds,
+ "uptime_human": uptime_human,
+ "current_time": current_time,
+ "timezone": "UTC",
+ }
+
+
+def get_request_info(request: Request):
+ """Get information about request."""
+ logger.debug('Getting info about request.')
+ return {
+ "client_ip": request.client.host,
+ "user_agent": request.headers.get("user-agent"),
+ "method": request.method,
+ "path": request.url.path,
+ }
+
+
+def get_endpoints():
+ """Get all existing ednpoints."""
+ logger.debug('Getting list of all endpoints.')
+ return [
+ {"path": "/", "method": "GET", "description": "Service information"},
+ {"path": "/health", "method": "GET", "description": "Health check"},
+ ]
+
+
+@app.get("/", status_code=status.HTTP_200_OK)
+async def root(request: Request):
+ """Main endpoint - service and system information."""
+ logger.debug(f'Request: {request.method} {request.url.path}')
+ return {
+ "service": get_service_info(),
+ "system": get_system_info(),
+ "runtime": get_runtime_info(),
+ "request": get_request_info(request),
+ "endpoints": get_endpoints(),
+ }
+
+
+@app.get("/health", status_code=status.HTTP_200_OK)
+async def health(request: Request):
+ """Endpoint to check health."""
+ logger.debug(f'Request: {request.method} {request.url.path}')
+ return {
+ "status": "healthy",
+ "timestamp": datetime.now(timezone.utc),
+ "uptime_seconds": get_uptime()["seconds"],
+ }
+
+
+@app.exception_handler(HTTPException)
+async def http_exception_handler(request: Request, exc: HTTPException):
+ """Exception 404 (Not found) that endpoint does not exists."""
+ if exc.status_code == 404:
+ return JSONResponse(
+ status_code=404,
+ content={
+ "error": "Not Found",
+ "message": "Endpoint does not exist",
+ },
+ )
+ return JSONResponse(
+ status_code=exc.status_code,
+ content={
+ "error": "HTTP Error",
+ "message": exc.detail if exc.detail else "Request failed",
+ },
+ )
+
+
+@app.exception_handler(Exception)
+async def unhandled_exception_handler(request: Request, exc: Exception):
+ """Exception 500 (Internal Server Error) - For any unhandled errors."""
+ return JSONResponse(
+ status_code=500,
+ content={
+ "error": "Internal Server Error",
+ "message": "An unexpected error occurred",
+ },
+ )
+
+
+if __name__ == "__main__":
+ # The entry point
+ logger.info('Application starting...')
+
+ uvicorn.run("app:app", host=HOST, port=PORT, reload=True)
diff --git a/app_python/docs/LAB01.md b/app_python/docs/LAB01.md
new file mode 100644
index 0000000000..1d43f6863d
--- /dev/null
+++ b/app_python/docs/LAB01.md
@@ -0,0 +1,341 @@
+# 1. Framework selection.
+## Choice
+I have chose FastAPI because I have had an experience with it and have no experience with other frameworks.
+
+## Comparison with Alternatives
+
+| Criteria | FastAPI (chosen) | Flask | Django (DRF) |
+|---------|-----------------|-------|--------------|
+| Primary use | APIs / microservices | Lightweight web apps & APIs | Full-stack apps & large APIs |
+| Performance model | ASGI (async-ready) | WSGI (sync by default) | WSGI/ASGI (heavier stack) |
+| Built-in API docs | Yes (Swagger/OpenAPI) | No (manual/add-ons) | Yes (via DRF) |
+| Validation / typing | Strong (type hints + Pydantic) | Manual or extensions | Strong (serializers) |
+| Boilerplate | Low | Very low | Higher |
+| Learning curve | Lowβmedium | Low | Mediumβhigh |
+| Best fit for this lab | Excellent | Good | Overkill |
+
+---
+
+# 2. Best Practices Applied
+## Clean Code Organization
+
+### 1) Clear Function Names
+The code uses descriptive, intention-revealing function names that clearly communicate what each block returns:
+
+```python
+def get_service_info():
+ """Get information about service."""
+ ...
+
+def get_system_info():
+ """Get information about system."""
+ ...
+
+def get_runtime_info():
+ """Get information about runtime."""
+ ...
+
+def get_request_info(request: Request):
+ """Get information about request."""
+ ...
+```
+**Why it matters**: Clear naming improves readability, reduces the need for extra comments, and makes the code easier to maintain and extend.
+
+### 2) Proper imports grouping
+Imports are organized by category (standard library first, then third-party libraries), which is the common Python convention:
+```python
+import logging
+import os
+import platform
+import socket
+from datetime import datetime, timezone
+
+import uvicorn
+from fastapi import FastAPI, Request, status
+from fastapi.responses import JSONResponse
+from starlette.exceptions import HTTPException
+```
+**Why it matters**: Grouped imports make dependencies easier to understand at a glance, help keep the file structured, and align with typical linting rules.
+
+### 3) Comments only where needed
+Instead of excessive inline comments, the code relies on clear names and short docstrings:
+```python
+"""
+DevOps Info Service
+Main application module
+"""
+
+def get_uptime():
+ """Get uptime."""
+ ...
+```
+**Why it matters**: Too many comments can become outdated. Minimal documentation plus clean naming keeps the codebase readable and accurate.
+
+### 4) Follow PEP 8
+The implementation follows common PEP 8 practices:
+- consistent indentation and spacing,
+- snake_case for variables and function names,
+- configuration/constants placed near the top of the module (HOST, PORT, DEBUG),
+- readable multi-line formatting for long calls:
+```python
+"""
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+)
+```
+**Why it matters**: PEP 8 improves consistency, supports teamwork, and makes the code compatible with linters/formatters such as `flake8`, `ruff`, and `black`.
+
+## Error Handling
+The service implements centralized error handling using FastAPI/Starlette exception handlers. This ensures that errors are returned in a consistent JSON format and that clients receive meaningful messages instead of raw stack traces.
+
+### HTTP errors (e.g., 404 Not Found)
+A dedicated handler processes HTTP-related exceptions and customizes the response for missing endpoints.
+
+```python
+from starlette.exceptions import HTTPException
+
+@app.exception_handler(HTTPException)
+async def http_exception_handler(request: Request, exc: HTTPException):
+ if exc.status_code == 404:
+ return JSONResponse(
+ status_code=404,
+ content={
+ "error": "Not Found",
+ "message": "Endpoint does not exist",
+ },
+ )
+
+ return JSONResponse(
+ status_code=exc.status_code,
+ content={
+ "error": "HTTP Error",
+ "message": exc.detail if exc.detail else "Request failed",
+ },
+ )
+```
+**Why it matters**:
+- Provides a clear and user-friendly message for invalid routes.
+- Keeps error responses consistent across the API.
+- Avoids exposing internal implementation details to the client.
+
+### Unhandled exceptions (500 Internal Server Error)
+A global handler catches any unexpected exceptions and returns a safe, standardized response.
+
+```python
+@app.exception_handler(Exception)
+async def unhandled_exception_handler(request: Request, exc: Exception):
+ return JSONResponse(
+ status_code=500,
+ content={
+ "error": "Internal Server Error",
+ "message": "An unexpected error occurred",
+ },
+ )
+```
+**Why it matters**:
+- Prevents server crashes from unhandled errors.
+- Ensures clients always receive valid JSON (important for automation/scripts).
+- Helps keep production behavior predictable while preserving the option to log the exception internally.
+
+## 3. Logging
+The service includes basic logging configuration to improve observability and simplify debugging. Logs are useful both during development (troubleshooting requests and behavior) and in production (monitoring, incident investigation).
+
+### Logging setup
+A global logging configuration is defined at startup with a consistent log format:
+```python
+import logging
+
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+)
+logger = logging.getLogger(__name__)
+```
+**Why it matters**:
+- Provides timestamps and log levels for easier troubleshooting.
+- A consistent format makes logs easier to parse in log aggregators (e.g., ELK, Loki).
+- Centralized config avoids inconsistent logging across modules.
+
+### Startup logging
+The application logs an informational message when it starts:
+```python
+if __name__ == "__main__":
+ logger.info("Application starting...")
+ uvicorn.run("app:app", host=HOST, port=PORT, reload=True)
+```
+**Why it matters**:
+- Confirms that the service started successfully.
+- Helps identify restarts and uptime issues.
+
+### Request logging (debug level)
+Each endpoint logs basic request information (method and path):
+```python
+@app.get("/", status_code=status.HTTP_200_OK)
+async def root(request: Request):
+ logger.debug(f"Request: {request.method} {request.url.path}")
+ ...
+```
+**Why it matters**:
+- Helps trace API usage during development.
+- Useful for debugging routing problems and unexpected client behavior.
+
+## 4. Dependencies (requirements.txt)
+The project keeps dependencies minimal and focused on what is required to run a FastAPI service in production.
+### requirements.txt
+```txt
+fastapi==0.122.0
+uvicorn[standard]==0.38.0
+```
+**Why it matters**:
+- Faster builds & simpler setup: fewer packages mean faster installation and fewer moving parts.
+- Lower risk of conflicts: minimal dependencies reduce version incompatibilities and βdependency hellβ.
+- Better security posture: fewer third-party libraries reduce the overall attack surface.
+- More predictable deployments: only installing what the service truly needs improves reproducibility across environments (local, CI, Docker, VM).
+
+## 5. Git Ignore (.gitignore)
+
+A `.gitignore` file is used to prevent committing temporary, machine-specific, or sensitive files into the repository.
+
+### Recommended `.gitignore`
+```gitignore
+# Python
+__pycache__/
+*.py[cod]
+venv/
+*.log
+
+# IDE
+.vscode/
+.idea/
+
+# OS
+.DS_Store
+```
+**Why it matters**:
+- Keeps the repository clean: avoids committing generated files (`__pycache__`, build outputs, logs).
+- Improves portability: prevents OS- and IDE-specific files from polluting the project and causing noisy diffs.
+- Protects secrets: ensures configuration files like `.env` (which may contain API keys or credentials) are not accidentally pushed.
+- Reduces merge conflicts: fewer irrelevant files tracked by Git means fewer conflicts between contributors.
+
+# 3. API Documentation
+The service exposes two endpoints: the main information endpoint and a health check endpoint.
+## Request/response examples
+### GET `/` β Service and System Information
+**Description:**
+Returns comprehensive metadata about the service, system, runtime, request details, and available endpoints.
+**Request example:**
+```bash
+curl -i http://127.0.0.1:8000/
+```
+**Response example (200 OK):**
+```json
+{
+ "service": {
+ "name": "devops-info-service",
+ "version": "1.0.0",
+ "description": "DevOps course info service",
+ "framework": "FastAPI"
+ },
+ "system": {
+ "hostname": "my-laptop",
+ "platform": "Linux",
+ "platform_version": "Ubuntu 24.04",
+ "architecture": "x86_64",
+ "cpu_count": 8,
+ "python_version": "3.13.1"
+ },
+ "runtime": {
+ "uptime_seconds": 3600,
+ "uptime_human": "1 hour, 0 minutes",
+ "current_time": "2026-01-07T14:30:00.000Z",
+ "timezone": "UTC"
+ },
+ "request": {
+ "client_ip": "127.0.0.1",
+ "user_agent": "curl/7.81.0",
+ "method": "GET",
+ "path": "/"
+ },
+ "endpoints": [
+ {"path": "/", "method": "GET", "description": "Service information"},
+ {"path": "/health", "method": "GET", "description": "Health check"}
+ ]
+}
+```
+### GET /health β Health Check
+**Description:**
+Returns a simple status response to confirm the service is running.**Request example:**
+```bash
+curl -i http://127.0.0.1:8000/health
+```
+**Response example (200 OK):**
+```json
+{
+ "status": "healthy",
+ "timestamp": "2024-01-15T14:30:00.000Z",
+ "uptime_seconds": 3600
+}
+```
+## Testing commands
+### Basic tests
+```bash
+curl http://127.0.0.1:8000/
+curl http://127.0.0.1:8000/health
+```
+### Test 404 handling (unknown endpoint)
+```bash
+curl -i http://127.0.0.1:8000/does-not-exist
+```
+Expected response (404):
+```json
+{
+ "error": "Not Found",
+ "message": "Endpoint does not exist"
+}
+```
+
+# 4. Testing Evidence
+Check screenshots.
+
+# 5. Challenges & Solutions
+I have no problems in this lab.
+
+# GitHub Community
+**Why Stars Matter:**
+
+**Discovery & Bookmarking:**
+- Stars help you bookmark interesting projects for later reference
+- Star count indicates project popularity and community trust
+- Starred repos appear in your GitHub profile, showing your interests
+
+**Open Source Signal:**
+- Stars encourage maintainers (shows appreciation)
+- High star count attracts more contributors
+- Helps projects gain visibility in GitHub search and recommendations
+
+**Professional Context:**
+- Shows you follow best practices and quality projects
+- Indicates awareness of industry tools and trends
+
+**Why Following Matters:**
+
+**Networking:**
+- See what other developers are working on
+- Discover new projects through their activity
+- Build professional connections beyond the classroom
+
+**Learning:**
+- Learn from others' code and commits
+- See how experienced developers solve problems
+- Get inspiration for your own projects
+
+**Collaboration:**
+- Stay updated on classmates' work
+- Easier to find team members for future projects
+- Build a supportive learning community
+
+**Career Growth:**
+- Follow thought leaders in your technology stack
+- See trending projects in real-time
+- Build visibility in the developer community
\ No newline at end of file
diff --git a/app_python/docs/LAB02.md b/app_python/docs/LAB02.md
new file mode 100644
index 0000000000..977e742f10
--- /dev/null
+++ b/app_python/docs/LAB02.md
@@ -0,0 +1,243 @@
+# Docker Best Practices Applied
+
+## 1. Non-root user
+**What I did:**
+Created a dedicated user (`appuser`) and ran the container as that user.
+
+**Why it matters**:
+Running as root in a container increases the impact of a container escape or a vulnerable dependency. A non-root user reduces privileges and limits potential damage.
+
+**Dockerfile snippet:**
+```dockerfile
+RUN useradd --create-home --shell /bin/bash appuser
+USER appuser
+```
+
+## 2. Layer caching
+**What I did:**
+Copied `requirements.txt` first and installed dependencies before copying the rest of the source code.
+
+**Why it matters**:
+Docker caches layers. Dependencies change less frequently than application code, so separating them allows rebuilds to reuse the cached dependency layer and rebuild faster.
+
+**Dockerfile snippet:**
+```dockerfile
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+COPY app.py .
+```
+
+## 3. .dockerignore
+**What I did:**
+Added .dockerignore to exclude local artifacts such as venvs, caches, and IDE folders.
+
+**Why it matters**:
+Docker sends the build context to the daemon. Excluding unnecessary files makes builds faster, reduces image bloat, and prevents leaking local secrets/files into the build.
+
+**.dockerignore snippet:**
+```.dockerignore
+# π Version control
+.git
+.gitignore
+
+# π Python
+__pycache__
+*.pyc
+*.pyo
+venv/
+.venv/
+
+# π Secrets (NEVER include!)
+.env
+*.pem
+secrets/
+
+# π Documentation
+*.md
+docs/
+
+# π§ͺ Tests (if not needed in container)
+tests/
+```
+
+## 4. Minimal base image
+**What I did:**
+Used `python:3.12-slim`.
+
+**Why it matters**:
+Smaller images generally mean fewer packages, fewer vulnerabilities, less bandwidth and faster pulls/deployments.
+
+**dockerfile snippet:**
+```dockerfile
+FROM python:3.12-slim
+```
+
+# Image Information & Decisions
+## Base image chosen and justification (why this specific version?)
+**Base image**:
+`python:3.12-slim`
+
+**Justification**:
+- Python 3.12 matches the project runtime requirements.
+- slim variant keeps image smaller and reduces OS packages.
+- Official Python images are widely used and well maintained.
+
+## Final image size and my assessment
+**Image size**:
+`195MB`
+
+**My assessment**:
+Further optimization is possible (multi-stage build, wheels caching, removing build deps), but for this lab the size is acceptable.
+
+## Layer structure explanation
+1. Base image layer (`python:3.12-slim`)
+2. User creation layer
+3. `WORKDIR` and `requirements.txt` copy layer
+4. pip install dependencies layer (largest and most valuable for caching)
+5. Application source copy layer
+6. EXPOSE, USER, and CMD metadata layers
+
+## Optimization choices
+- Used dependency-first copying to maximize caching.
+- Used `--no-cache-dir` with pip to reduce layer size.
+- Used slim base image to reduce OS footprint.
+- Added `.dockerignore` to reduce build context.
+
+# Build & Run Process
+## Complete terminal output from build process
+```bash
+ docker build -t python_app:1.0 .
+[+] Building 5.4s (12/12) FINISHED docker:desktop-linux
+ => [internal] load build definition from Dockerfile 0.0s
+ => => transferring dockerfile: 277B 0.0s
+ => [internal] load metadata for docker.io/library/python:3.12-slim 3.7s
+ => [auth] library/python:pull token for registry-1.docker.io 0.0s
+ => [internal] load .dockerignore 0.0s
+ => => transferring context: 289B 0.0s
+ => [1/6] FROM docker.io/library/python:3.12-slim@sha256:5e2dbd4bbdd9c0e67412aea9463906f74a22c60f89e 0.0s
+ => [internal] load build context 0.1s
+ => => transferring context: 62.28kB 0.1s
+ => CACHED [2/6] RUN useradd --create-home --shell /bin/bash appuser 0.0s
+ => CACHED [3/6] WORKDIR /app 0.0s
+ => CACHED [4/6] COPY requirements.txt . 0.0s
+ => CACHED [5/6] RUN pip install --no-cache-dir -r requirements.txt 0.0s
+ => [6/6] COPY app.py . 0.8s
+ => exporting to image 0.4s
+ => => exporting layers 0.3s
+ => => writing image sha256:a3d1dd41a468a1bb53d02edd846964c240eb160f49fd28e9f6ad90fc15677c52 0.0s
+ => => naming to docker.io/library/python_app:1.0 0.0s
+
+View build details: docker-desktop://dashboard/build/desktop-linux/desktop-linux/djiu836gkk9g7syuakivz5ynt
+```
+
+## Terminal output showing container running
+```bash
+ docker run --rm -p 8000:8000 python_app:1.0
+2026-01-31 18:29:56,977 - __main__ - INFO - Application starting...
+INFO: Will watch for changes in these directories: ['/app']
+INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
+INFO: Started reloader process [1] using WatchFiles
+INFO: Started server process [8]
+INFO: Waiting for application startup.
+INFO: Application startup complete.
+```
+
+## Terminal output from testing endpoints
+```bash
+curl http://localhost:8000/
+
+ StatusCode : 200
+StatusDescription : OK
+Content : {"service":{"name":"devops-info-service","version":"1.0.0","description":"DevOps cours
+ e info service","framework":"FastAPI"},"system":{"hostname":"3a77a23940f7","platform":
+ "Linux","platform_version":"...
+RawContent : HTTP/1.1 200 OK
+ Content-Length: 755
+ Content-Type: application/json
+ Date: Sat, 31 Jan 2026 18:31:18 GMT
+ Server: uvicorn
+
+Forms : {}
+Headers : {[Content-Length, 755], [Content-Type, application/json], [Date, Sat, 31 Jan 2026 18:3
+ 1:18 GMT], [Server, uvicorn]} Images : {} InputFields : {} Links : {}
+ParsedHtml : mshtml.HTMLDocumentClass
+RawContentLength : 755
+```
+
+```bash
+curl http://localhost:8000/health
+
+
+StatusCode : 200
+StatusDescription : OK
+Content : {"status":"healthy","timestamp":"2026-01-31T18:31:26.924513+00:00","uptime_seconds":89
+ }
+RawContent : HTTP/1.1 200 OK
+ Content-Length: 87
+ Content-Type: application/json
+ Date: Sat, 31 Jan 2026 18:31:26 GMT
+ Server: uvicorn
+
+ {"status":"healthy","timestamp":"2026-01-31T18:31:26.924513+00:00","uptime_...
+Forms : {}
+Headers : {[Content-Length, 87], [Content-Type, application/json], [Date, Sat, 31 Jan 2026 18:31
+ :26 GMT], [Server, uvicorn]}
+Images : {}
+InputFields : {}
+Links : {}
+ParsedHtml : mshtml.HTMLDocumentClass
+RawContentLength : 87
+```
+
+## Terminal output showing successful push:
+```bash
+docker push newspec/python_app:1.0
+
+The push refers to repository [docker.io/newspec/python_app]
+8422fdf98022: Pushed
+56a7b3684a2c: Pushed
+410b7369101c: Pushed
+4e7298e95b69: Pushed
+b68196304589: Pushed
+343fbb74dfa7: Pushed
+cfdc6d123592: Pushed
+ff565e4de379: Pushed
+e50a58335e13: Pushed
+1.0: digest: sha256:9084f1513bc5af085a268ee9e8b165af82f7224e442da0790cf81f07b67ab10e size: 2203
+
+```
+
+## Docker Hub repository URL
+`https://hub.docker.com/repository/docker/newspec/python_app`
+
+## My tagging strategy
+`:1.0`(major/minor)
+
+# Technical analysis
+## Why does your Dockerfile work the way it does?
+- The image is based on a minimal Python runtime.
+- Dependencies are installed before application code to leverage Docker layer caching.
+- The app runs as a non-root user for better security.
+
+## What would happen if you changed the layer order?
+If the Dockerfile copied the entire project (`COPY . .`) before installing requirements, then any code change would invalidate the cache for the dependency install layer. That would force `pip install` to run again on every rebuild, making builds much slower.
+
+## What security considerations did you implement?
+- Non-root execution reduces privilege.
+- Smaller base image reduces attack surface.
+- `.dockerignore` prevents accidentally shipping local files (including potential secrets) into the image.
+
+## How does .dockerignore improve your build?
+- Reduces build context size (faster build, less IO).
+- Avoids copying local venvs and cache files into the image.
+- Prevents leaking IDE configs, git history, logs, and other irrelevant files.
+
+## Challenges & Solutions
+# 1. βI cannot open my applicationβ after `docker run`
+**Fix**: Published the port with `-p 8000:8000`
+
+# What I Learned
+- Containers need explicit port publishing to be accessible from the host.
+- Layer ordering dramatically affects build speed due to caching.
+- Running as non-root is a simple but important security improvement.
+- .dockerignore is crucial to keep images clean and builds fast.
\ No newline at end of file
diff --git a/app_python/docs/LAB03.md b/app_python/docs/LAB03.md
new file mode 100644
index 0000000000..09c3e39c28
--- /dev/null
+++ b/app_python/docs/LAB03.md
@@ -0,0 +1,170 @@
+# LAB03 β Unit Testing + CI/CD + Security
+
+## 1. Overview
+
+### Testing framework used and why you chose it
+
+I chose **pytest** after comparing common Python testing frameworks (`unittest`, `pytest`, etc.).
+Pytest requires less boilerplate, has powerful fixtures and parametrization, produces clear failure
+output, and scales well with plugins and CI workflows.
+
+### What endpoints/functionality your tests cover
+
+All tests are located in `app_python/tests/` and use FastAPIβs `TestClient`. The test suite covers:
+
+- **GET /**
+ Verifies status code `200`, JSON structure, required top-level
+ sections (`service`, `system`, `runtime`, `request`, `endpoints`), and important nested
+ fields/types.
+- **GET /health**
+ Verifies status code `200`, required fields (`status`, `timestamp`, `uptime_seconds`), and basic
+ format checks.
+- **Error cases**
+ - **404 Not Found** returns the custom
+ JSON `{ "error": "Not Found", "message": "Endpoint does not exist" }`
+ - **Non-404 HTTPException** returns `{ "error": "HTTP Error", "message": "" }`
+ - **500 Internal Server Error**
+ returns `{ "error": "Internal Server Error", "message": "An unexpected error occurred" }`
+
+### CI workflow trigger configuration (when does it run?)
+
+The GitHub Actions workflow runs on **push** and **pull requests** to `lab03` and `master`, but only
+when changes affect:
+
+- `app_python/**`
+- `.github/workflows/python-ci.yml`
+
+This avoids unnecessary CI runs for unrelated edits. Docker images are built and pushed only on *
+*push** events (not on pull requests).
+
+### Versioning strategy chosen and rationale
+
+I use **CalVer (Calendar Versioning)** with format `YYYY.MM.DD` because this project is updated
+frequently and doesnβt require manual git release tags. Date-based versions are simple,
+human-readable, and work well for continuous delivery.
+
+---
+
+## 2. Workflow Evidence
+
+### β
Successful workflow run (GitHub Actions link)
+
+- https://github.com/newspec/DevOps-Core-Course/actions/runs/21822195126
+
+### β
Tests passing locally (terminal output)
+
+```bash
+pytest
+========================================== test session starts ===========================================
+platform win32 -- Python 3.12.4, pytest-8.4.2, pluggy-1.6.0
+rootdir: C:\Users\malov\PycharmProjects\DevOps-Core-Course
+plugins: anyio-4.11.0, asyncio-1.3.0, cov-7.0.0
+asyncio: mode=Mode.STRICT, debug=False, asyncio_default_fixture_loop_scope=None, asyncio_default_test_loop_scope=function
+collected 7 items
+
+app_python\tests\test_errors.py ... [ 42%]
+app_python\tests\test_health.py .. [ 71%]
+app_python\tests\test_root.py .. [100%]
+
+=========================================== 7 passed in 0.48s ============================================
+```
+
+### β
Docker image on Docker Hub (link to your image)
+
+- https://hub.docker.com/repository/docker/newspec/python_app/general
+
+### β
Status badge working in README
+
+Check the top page of README.md
+
+## 3. Best Practices Implemented
+
+- **Fail Fast**: If a step fails, the job stops immediately, saving CI time and making failures
+ obvious.
+
+- **Job Dependencies** : Docker push depends on successful `test` and `security` jobs, preventing
+ publishing broken/insecure builds.
+
+- **Dependency Caching (pip)**: `setup-python` caches pip downloads so installs are faster on
+ repeated runs.
+
+- **Docker Layer Caching**: Buildx + GHA cache reuses Docker layers across runs, reducing Docker
+ build time significantly.
+
+- **Secrets Management**: Tokens (Docker Hub + Snyk) are stored in GitHub Secrets and never
+ committed.
+
+### Caching: time saved (before vs after)
+
+Measured by comparing two workflow runs:
+
+- **Cold run (cache miss)**: 80s total
+ - tests: 14s
+ - security: 29s
+ - docker build: 41s
+
+- **Warm run (cache hit)**: 64s total
+ - tests: 11s
+ - security: 22s
+ - docker build: 30s
+
+**Improvement**:
+
+- Total time saved: **16s**
+- Docker build time saved: **11s**
+- Percent improvement: **~20%**
+
+Evidence (screenshots):
+
+- First run (no cache): 
+- Second run (cache hit): 
+
+### Snyk: vulnerabilities found? action taken
+
+Snyk is executed in a separate `security` job using:
+
+```bash
+snyk test --severity-threshold=high
+```
+
+- If `high` (or above) vulnerabilities are found, the security job fails.
+- Because Docker depends on `security`, the image will **not be pushed** until vulnerabilities are
+ fixed or the threshold is adjusted.
+
+**Snyk result**: 0 high/critical vulnerabilities (build passed)
+
+## Key Decisions
+
+### Versioning Strategy: SemVer or CalVer? Why did you choose it for your app?
+
+I chose **CalVer** (`YYYY.MM.DD`) because the application is built frequently and does not follow
+formal release cycles. Date-based versioning is easy to automate in CI and provides clear
+information about when an image was built.
+
+### Docker Tags: What tags does your CI create? (e.g., latest, version number, etc.)
+
+The CI publishes the Docker image with these tags:
+
+- `YYYY.MM.DD` (CalVer date tag) β e.g., `2026.02.09`
+- `${{ github.sha }}` (commit SHA tag) β uniquely identifies the build source commit
+- `latest` β points to the most recent image build from the `lab03` or `master` branch
+
+### Workflow Triggers: Why did you choose those triggers?
+
+The workflow runs on push and pull request to `lab03` and `master` and only when relevant files
+change. This avoids running CI for unrelated edits. Docker images are built and pushed only on push
+events (not on pull requests) for `lab03` and `master`.
+
+### Test Coverage: What's tested vs not tested?
+
+**Tested**:
+
+- Successful responses for `GET /` and `GET /health`
+- Response JSON structure and required fields
+- Custom error handling for 404, non-404 `HTTPException`, and 500 errors
+
+**Not tested**:
+
+- Performance/load behavior
+- External integrations (none in this app)
+- Detailed validation of dynamic fields beyond basic format/type checks (e.g., exact timestamps)
\ No newline at end of file
diff --git a/app_python/docs/screenshots/01-main-endpoint.png b/app_python/docs/screenshots/01-main-endpoint.png
new file mode 100644
index 0000000000..2b5ad4b17f
Binary files /dev/null and b/app_python/docs/screenshots/01-main-endpoint.png differ
diff --git a/app_python/docs/screenshots/02-health-check.png b/app_python/docs/screenshots/02-health-check.png
new file mode 100644
index 0000000000..321da88450
Binary files /dev/null and b/app_python/docs/screenshots/02-health-check.png differ
diff --git a/app_python/docs/screenshots/03-formatted-output.png b/app_python/docs/screenshots/03-formatted-output.png
new file mode 100644
index 0000000000..bd76d10670
Binary files /dev/null and b/app_python/docs/screenshots/03-formatted-output.png differ
diff --git a/app_python/docs/screenshots/cache_hit.png b/app_python/docs/screenshots/cache_hit.png
new file mode 100644
index 0000000000..cf23e1e1a9
Binary files /dev/null and b/app_python/docs/screenshots/cache_hit.png differ
diff --git a/app_python/docs/screenshots/cache_miss.png b/app_python/docs/screenshots/cache_miss.png
new file mode 100644
index 0000000000..a03f43a091
Binary files /dev/null and b/app_python/docs/screenshots/cache_miss.png differ
diff --git a/app_python/requirements.txt b/app_python/requirements.txt
new file mode 100644
index 0000000000..c6245af3cf
--- /dev/null
+++ b/app_python/requirements.txt
@@ -0,0 +1,6 @@
+fastapi==0.122.0
+uvicorn[standard]==0.38.0
+pytest==8.4.2
+flake8==7.3.0
+httpx==0.28.1
+pytest-cov==7.0.0
diff --git a/app_python/tests/__init__.py b/app_python/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/app_python/tests/conftest.py b/app_python/tests/conftest.py
new file mode 100644
index 0000000000..d1bf72913b
--- /dev/null
+++ b/app_python/tests/conftest.py
@@ -0,0 +1,14 @@
+import pytest
+from fastapi.testclient import TestClient
+
+from app import app
+
+
+@pytest.fixture()
+def client():
+ return TestClient(app)
+
+
+@pytest.fixture()
+def client_no_raise():
+ return TestClient(app, raise_server_exceptions=False)
diff --git a/app_python/tests/test_errors.py b/app_python/tests/test_errors.py
new file mode 100644
index 0000000000..076f70dff6
--- /dev/null
+++ b/app_python/tests/test_errors.py
@@ -0,0 +1,52 @@
+from fastapi import HTTPException
+
+from app import app
+
+
+def test_500_returns_custom_json(client_no_raise):
+ @app.get("/__test_crash__")
+ async def __test_crash__():
+ raise RuntimeError("boom")
+
+ try:
+ r = client_no_raise.get("/__test_crash__")
+ assert r.status_code == 500
+ assert r.json() == {
+ "error": "Internal Server Error",
+ "message": "An unexpected error occurred",
+ }
+ finally:
+ app.router.routes = [
+ route for route in app.router.routes
+ if getattr(route, "path", None) != "/__test_crash__"
+ ]
+
+
+def test_http_exception_handler_404_custom_json(client):
+ r = client.get("/this-endpoint-does-not-exist")
+
+ assert r.status_code == 404
+ assert r.json() == {
+ "error": "Not Found",
+ "message": "Endpoint does not exist",
+ }
+
+
+def test_http_exception_handler_non_404_returns_http_error_json(client):
+ @app.get("/__test_http_418__")
+ async def __test_http_418__():
+ raise HTTPException(status_code=418, detail="I'm a teapot")
+
+ try:
+ r = client.get("/__test_http_418__")
+
+ assert r.status_code == 418
+ assert r.json() == {
+ "error": "HTTP Error",
+ "message": "I'm a teapot",
+ }
+ finally:
+ app.router.routes = [
+ route for route in app.router.routes
+ if getattr(route, "path", None) != "/__test_http_418__"
+ ]
diff --git a/app_python/tests/test_health.py b/app_python/tests/test_health.py
new file mode 100644
index 0000000000..1d2b4f9dcd
--- /dev/null
+++ b/app_python/tests/test_health.py
@@ -0,0 +1,18 @@
+def test_health_status_code(client):
+ r = client.get("/health")
+ assert r.status_code == 200
+
+
+def test_health_response_structure(client):
+ r = client.get("/health")
+ data = r.json()
+
+ for key in ["status", "timestamp", "uptime_seconds"]:
+ assert key in data, f"Missing health field: {key}"
+
+ assert data["status"] == "healthy"
+ assert isinstance(data["uptime_seconds"], int)
+ assert data["uptime_seconds"] >= 0
+
+ assert isinstance(data["timestamp"], str)
+ assert "T" in data["timestamp"]
diff --git a/app_python/tests/test_root.py b/app_python/tests/test_root.py
new file mode 100644
index 0000000000..9ca61460fa
--- /dev/null
+++ b/app_python/tests/test_root.py
@@ -0,0 +1,70 @@
+def test_root_status_code(client):
+ r = client.get("/")
+ assert r.status_code == 200
+
+
+def test_root_json_structure_and_required_fields(client):
+ r = client.get("/")
+ data = r.json()
+
+ for key in ["service", "system", "runtime", "request", "endpoints"]:
+ assert key in data, f"Missing top-level field: {key}"
+
+ service = data["service"]
+ for key in ["name", "version", "description", "framework"]:
+ assert key in service, f"Missing service field: {key}"
+ assert service["name"] == "devops-info-service"
+ assert service["framework"] == "FastAPI"
+
+ system = data["system"]
+ for key in [
+ "hostname",
+ "platform",
+ "platform_version",
+ "architecture",
+ "cpu_count",
+ "python_version",
+ ]:
+ assert key in system, f"Missing system field: {key}"
+
+ assert isinstance(system["hostname"], str) and system["hostname"]
+ assert isinstance(system["platform"], str) and system["platform"]
+ assert isinstance(system["architecture"], str) and system["architecture"]
+ assert ((system["cpu_count"] is None) or
+ isinstance(system["cpu_count"], int))
+
+ runtime = data["runtime"]
+ for key in ["uptime_seconds", "uptime_human", "current_time", "timezone"]:
+ assert key in runtime, f"Missing runtime field: {key}"
+
+ assert isinstance(runtime["uptime_seconds"], int)
+ assert runtime["uptime_seconds"] >= 0
+ assert isinstance(runtime["uptime_human"], str) and runtime["uptime_human"]
+ assert runtime["timezone"] == "UTC"
+
+ assert isinstance(runtime["current_time"], str)
+ assert "T" in runtime["current_time"]
+
+ req = data["request"]
+ for key in ["client_ip", "user_agent", "method", "path"]:
+ assert key in req, f"Missing request field: {key}"
+
+ assert req["method"] == "GET"
+ assert req["path"] == "/"
+ assert isinstance(req["client_ip"], str) and req["client_ip"]
+ assert ("user_agent" in req)
+
+ endpoints = data["endpoints"]
+ assert isinstance(endpoints, list)
+ assert len(endpoints) >= 2
+
+ paths = {(e.get("path"), e.get("method")) for e in endpoints}
+ assert ("/", "GET") in paths
+ assert ("/health", "GET") in paths
+
+ for e in endpoints:
+ for key in ["path", "method", "description"]:
+ assert key in e
+ assert isinstance(e["path"], str) and e["path"].startswith("/")
+ assert e["method"] in {"GET", "POST", "PUT", "DELETE", "PATCH"}
+ assert isinstance(e["description"], str) and e["description"]
diff --git a/docs/LAB04.md b/docs/LAB04.md
new file mode 100644
index 0000000000..b17667b00d
--- /dev/null
+++ b/docs/LAB04.md
@@ -0,0 +1,1192 @@
+# Lab 04 - Infrastructure as Code (Terraform & Pulumi)
+
+## 1. Cloud Provider & Infrastructure
+
+### Cloud Provider Choice: Yandex Cloud
+
+**Rationale:**
+- **Accessibility in Russia**: No restrictions or sanctions affecting access
+- **Free Tier**: 1 VM with 20% vCPU, 1 GB RAM, 10 GB storage
+- **No Credit Card Required**: Can start without payment method
+- **Good Documentation**: Available in Russian and English
+- **Terraform & Pulumi Support**: Official providers available
+- **Local Data Centers**: Lower latency for Russian users
+
+**Alternative Considered:**
+- AWS: More popular globally but requires credit card and may have access issues
+- GCP: Good free tier but complex setup
+- VK Cloud: Russian alternative but less mature tooling support
+
+### Instance Configuration
+
+**VM Specifications:**
+- **Platform**: standard-v2
+- **CPU**: 2 cores @ 20% (free tier)
+- **Memory**: 1 GB RAM
+- **Disk**: 10 GB HDD (network-hdd)
+- **OS**: Ubuntu 24.04 LTS
+- **Region/Zone**: ru-central1-a
+
+**Network Configuration:**
+- **VPC Network**: Custom network (10.128.0.0/24)
+- **Public IP**: Yes (NAT enabled)
+- **Security Group Rules**:
+ - SSH (port 22): Allow from anywhere (0.0.0.0/0)
+ - HTTP (port 80): Allow from anywhere
+ - Custom (port 5000): Allow from anywhere (for app deployment)
+ - Egress: Allow all outbound traffic
+
+### Cost Analysis
+
+**Total Cost: $0.00/month**
+
+Using Yandex Cloud free tier:
+- VM: Free (20% vCPU, 1 GB RAM within limits)
+- Storage: Free (10 GB HDD within limits)
+- Network: Free (within egress limits)
+- Public IP: Free (1 static IP included)
+
+### Resources Created
+
+**Terraform Resources:**
+1. `yandex_vpc_network.lab04_network` - VPC network
+2. `yandex_vpc_subnet.lab04_subnet` - Subnet (10.128.0.0/24)
+3. `yandex_vpc_security_group.lab04_sg` - Security group with firewall rules
+4. `yandex_compute_instance.lab04_vm` - VM instance
+
+**Pulumi Resources:**
+1. `lab04-network` - VPC network
+2. `lab04-subnet` - Subnet (10.128.0.0/24)
+3. `lab04-sg` - Security group with firewall rules
+4. `lab04-vm` - VM instance
+
+---
+
+## 2. Terraform Implementation
+
+### Terraform Version
+
+```bash
+Terraform v1.9.0
+on darwin_arm64
++ provider registry.terraform.io/integrations/github v5.45.0
++ provider registry.terraform.io/yandex-cloud/yandex v0.187.0
+```
+
+### Project Structure
+
+```
+terraform/
+βββ .terraform.lock.hcl # Provider version lock file
+βββ .terraformrc # Terraform CLI configuration (Yandex mirror)
+βββ .tflint.hcl # TFLint configuration
+βββ main.tf # Main resources (VM, network, security)
+βββ variables.tf # Input variable declarations
+βββ outputs.tf # Output value definitions
+βββ github.tf # GitHub provider (bonus task)
+βββ terraform.tfvars # Actual configuration (gitignored)
+βββ terraform.tfstate # State file (gitignored)
+βββ terraform.tfstate.backup # State backup (gitignored)
+```
+
+**Note:** `terraform.tfstate` files are present locally but excluded from Git via `.gitignore`.
+
+### Key Configuration Decisions
+
+**1. Provider Configuration**
+- Used Yandex Cloud provider version ~> 0.187
+- Authentication via Service Account key (authorized key JSON file)
+- Configured default zone (ru-central1-a) and folder_id
+- GitHub provider version ~> 5.45 for repository management
+
+**2. Resource Organization**
+- Separated resources logically in main.tf
+- Used data source for Ubuntu image (latest 24.04 LTS)
+- Created dedicated VPC network instead of using default
+
+**3. Security Approach**
+- Security group with explicit ingress/egress rules
+- SSH key injection via metadata
+- All sensitive values in gitignored terraform.tfvars
+- Used variables for all configurable parameters
+
+**4. Free Tier Optimization**
+- Set `core_fraction = 20` for free tier CPU
+- Used `network-hdd` disk type (cheaper than SSD)
+- Minimal 10 GB disk size
+- Single VM instance
+
+### Challenges Encountered
+
+**1. Authentication Setup**
+- **Issue**: Initial confusion about OAuth token vs service account
+- **Solution**: Created Service Account with appropriate roles, generated authorized key (JSON)
+- **Learning**: Service accounts provide better security and are recommended for automation
+
+**3. Image Selection**
+- **Issue**: Needed to find correct Ubuntu 24.04 image family name
+- **Solution**: Used data source with `family = "ubuntu-2404-lts"`
+- **Learning**: Data sources are powerful for dynamic resource lookup
+
+**4. Free Tier Configuration**
+- **Issue**: Ensuring configuration stays within free tier limits
+- **Solution**: Set `core_fraction = 20`, used network-hdd, 10 GB disk
+- **Learning**: Important to understand cloud provider pricing models
+
+### Terraform Commands Output
+
+#### terraform init
+
+```bash
+$ cd terraform/
+terraform init
+
+Initializing the backend...
+
+Initializing provider plugins...
+- Finding yandex-cloud/yandex versions matching "~> 0.100"...
+- Finding integrations/github versions matching "~> 5.0"...
+- Installing yandex-cloud/yandex v0.187.0...
+- Installed yandex-cloud/yandex v0.187.0 (unauthenticated)
+- Installing integrations/github v5.45.0...
+- Installed integrations/github v5.45.0 (unauthenticated)
+
+Terraform has created a lock file .terraform.lock.hcl to record the provider
+selections it made above. Include this file in your version control repository
+so that Terraform can guarantee to make the same selections by default when
+you run "terraform init" in the future.
+
+β·
+β Warning: Incomplete lock file information for providers
+β
+β Due to your customized provider installation methods, Terraform was forced to calculate lock file checksums
+β locally for the following providers:
+β - integrations/github
+β - yandex-cloud/yandex
+β
+β The current .terraform.lock.hcl file only includes checksums for darwin_arm64, so Terraform running on another
+β platform will fail to install these providers.
+β
+β To calculate additional checksums for another platform, run:
+β terraform providers lock -platform=linux_amd64
+β (where linux_amd64 is the platform to generate)
+β΅
+
+Terraform has been successfully initialized!
+
+You may now begin working with Terraform. Try running "terraform plan" to see
+any changes that are required for your infrastructure. All Terraform commands
+should now work.
+
+If you ever set or change modules or backend configuration for Terraform,
+rerun this command to reinitialize your working directory. If you forget, other
+commands will detect it and remind you to do so if necessary.
+```
+
+#### terraform plan
+
+```bash
+terraform plan
+var.cloud_id
+ Yandex Cloud ID
+
+ Enter a value: ********
+
+data.yandex_compute_image.ubuntu: Reading...
+data.yandex_compute_image.ubuntu: Read complete after 0s [id=********]
+
+Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
+ + create
+
+Terraform will perform the following actions:
+
+ # github_branch_protection.master_protection will be created
+ + resource "github_branch_protection" "master_protection" {
+ + allows_deletions = false
+ + allows_force_pushes = false
+ + blocks_creations = false
+ + enforce_admins = false
+ + id = (known after apply)
+ + lock_branch = false
+ + pattern = "master"
+ + repository_id = (known after apply)
+ + require_conversation_resolution = false
+ + require_signed_commits = false
+ + required_linear_history = false
+
+ + required_pull_request_reviews {
+ + dismiss_stale_reviews = true
+ + require_code_owner_reviews = false
+ + require_last_push_approval = false
+ + required_approving_review_count = 0
+ }
+ }
+
+ # github_repository.devops_course will be created
+ + resource "github_repository" "devops_course" {
+ + allow_auto_merge = false
+ + allow_merge_commit = true
+ + allow_rebase_merge = true
+ + allow_squash_merge = true
+ + archived = false
+ + default_branch = (known after apply)
+ + delete_branch_on_merge = true
+ + description = "DevOps Engineering: Core Practices - Lab assignments and projects"
+ + etag = (known after apply)
+ + full_name = (known after apply)
+ + git_clone_url = (known after apply)
+ + has_downloads = true
+ + has_issues = true
+ + has_projects = false
+ + has_wiki = false
+ + html_url = (known after apply)
+ + http_clone_url = (known after apply)
+ + id = (known after apply)
+ + merge_commit_message = "PR_TITLE"
+ + merge_commit_title = "MERGE_MESSAGE"
+ + name = "DevOps-Core-Course"
+ + node_id = (known after apply)
+ + primary_language = (known after apply)
+ + private = (known after apply)
+ + repo_id = (known after apply)
+ + squash_merge_commit_message = "COMMIT_MESSAGES"
+ + squash_merge_commit_title = "COMMIT_OR_PR_TITLE"
+ + ssh_clone_url = (known after apply)
+ + svn_url = (known after apply)
+ + topics = [
+ + "ansible",
+ + "ci-cd",
+ + "devops",
+ + "docker",
+ + "infrastructure-as-code",
+ + "kubernetes",
+ + "pulumi",
+ + "terraform",
+ ]
+ + visibility = "public"
+ + web_commit_signoff_required = false
+ }
+
+ # yandex_compute_instance.lab04_vm will be created
+ + resource "yandex_compute_instance" "lab04_vm" {
+ + created_at = (known after apply)
+ + folder_id = (known after apply)
+ + fqdn = (known after apply)
+ + gpu_cluster_id = (known after apply)
+ + hardware_generation = (known after apply)
+ + hostname = "lab04-vm"
+ + id = (known after apply)
+ + labels = {
+ + "environment" = "lab04"
+ + "managed_by" = "terraform"
+ + "purpose" = "devops-course"
+ }
+ + maintenance_grace_period = (known after apply)
+ + maintenance_policy = (known after apply)
+ + metadata = {
+ + "ssh-keys" = <<-EOT
+ ************************
+ EOT
+ }
+ + name = "lab04-vm"
+ + network_acceleration_type = "standard"
+ + platform_id = "standard-v2"
+ + status = (known after apply)
+ + zone = "ru-central1-a"
+
+ + boot_disk {
+ + auto_delete = true
+ + device_name = (known after apply)
+ + disk_id = (known after apply)
+ + mode = (known after apply)
+
+ + initialize_params {
+ + block_size = (known after apply)
+ + description = (known after apply)
+ + image_id = "fd8lt661chfo5i13a40d"
+ + name = (known after apply)
+ + size = 10
+ + snapshot_id = (known after apply)
+ + type = "network-hdd"
+ }
+ }
+
+ + network_interface {
+ + index = (known after apply)
+ + ip_address = (known after apply)
+ + ipv4 = true
+ + ipv6 = (known after apply)
+ + ipv6_address = (known after apply)
+ + mac_address = (known after apply)
+ + nat = true
+ + nat_ip_address = (known after apply)
+ + nat_ip_version = (known after apply)
+ + subnet_id = (known after apply)
+ }
+
+ + resources {
+ + core_fraction = 20
+ + cores = 2
+ + memory = 1
+ }
+
+ + scheduling_policy {
+ + preemptible = false
+ }
+ }
+
+ # yandex_vpc_network.lab04_network will be created
+ + resource "yandex_vpc_network" "lab04_network" {
+ + created_at = (known after apply)
+ + default_security_group_id = (known after apply)
+ + description = "Network for Lab 04 VM"
+ + folder_id = (known after apply)
+ + id = (known after apply)
+ + labels = (known after apply)
+ + name = "lab04-network"
+ + subnet_ids = (known after apply)
+ }
+
+ # yandex_vpc_subnet.lab04_subnet will be created
+ + resource "yandex_vpc_subnet" "lab04_subnet" {
+ + created_at = (known after apply)
+ + description = "Subnet for Lab 04 VM"
+ + folder_id = (known after apply)
+ + id = (known after apply)
+ + labels = (known after apply)
+ + name = "lab04-subnet"
+ + network_id = (known after apply)
+ + v4_cidr_blocks = [
+ + "10.128.0.0/24",
+ ]
+ + v6_cidr_blocks = (known after apply)
+ + zone = "ru-central1-a"
+ }
+
+Plan: 5 to add, 0 to change, 0 to destroy.
+
+Changes to Outputs:
+ + connection_info = {
+ + private_ip = (known after apply)
+ + public_ip = (known after apply)
+ + ssh_command = (known after apply)
+ + ssh_user = "ubuntu"
+ }
+ + network_id = (known after apply)
+ + ssh_command = (known after apply)
+ + subnet_id = (known after apply)
+ + vm_id = (known after apply)
+ + vm_name = "lab04-vm"
+ + vm_private_ip = (known after apply)
+ + vm_public_ip = (known after apply)
+
+ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
+
+Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now.
+```
+
+#### terraform apply
+
+```bash
+terraform apply
+var.cloud_id
+ Yandex Cloud ID
+
+ Enter a value: ******
+
+github_repository.devops_course: Refreshing state... [id=DevOps-Core-Course]
+data.yandex_compute_image.ubuntu: Reading...
+yandex_vpc_network.lab04_network: Refreshing state... [id=******]
+data.yandex_compute_image.ubuntu: Read complete after 0s [id=******]
+yandex_vpc_subnet.lab04_subnet: Refreshing state... [id=******]
+yandex_compute_instance.lab04_vm: Refreshing state... [id=******]
+github_branch_protection.master_protection: Refreshing state... [id=B******]
+
+Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with
+the following symbols:
+-/+ destroy and then create replacement
+
+Terraform will perform the following actions:
+
+ # github_branch_protection.master_protection is tainted, so must be replaced
+-/+ resource "github_branch_protection" "master_protection" {
+ - force_push_bypassers = [] -> null
+ ~ id = "******" -> (known after apply)
+ - push_restrictions = [] -> null
+ # (10 unchanged attributes hidden)
+
+ ~ required_pull_request_reviews {
+ - dismissal_restrictions = [] -> null
+ - pull_request_bypassers = [] -> null
+ - restrict_dismissals = false -> null
+ # (4 unchanged attributes hidden)
+ }
+ }
+
+Plan: 1 to add, 0 to change, 1 to destroy.
+
+Do you want to perform these actions?
+ Terraform will perform the actions described above.
+ Only 'yes' will be accepted to approve.
+
+ Enter a value: yes
+
+github_branch_protection.master_protection: Destroying... [id=******]
+github_branch_protection.master_protection: Destruction complete after 0s
+github_branch_protection.master_protection: Creating...
+github_branch_protection.master_protection: Creation complete after 4s [id=******]
+
+Apply complete! Resources: 1 added, 0 changed, 1 destroyed.
+
+Outputs:
+
+connection_info = {
+ "private_ip" = "10.128.0.11"
+ "public_ip" = "84.201.128.171"
+ "ssh_command" = "ssh ubuntu@84.201.128.171"
+ "ssh_user" = "ubuntu"
+}
+network_id = "enp5kqg9rma6c31bjsen"
+ssh_command = "ssh ubuntu@84.201.128.171"
+subnet_id = "e9bl6fnifjfbe7ufp7tl"
+vm_id = "fhmbajpub1spksjhkvct"
+vm_name = "lab04-vm"
+vm_private_ip = "10.128.0.11"
+vm_public_ip = "84.201.128.171"
+```
+
+### SSH Connection Verification
+
+```bash
+ssh -i ~/.ssh/yandex_cloud_key ubuntu@84.201.128.171
+Welcome to Ubuntu 24.04.4 LTS (GNU/Linux 6.8.0-100-generic x86_64)
+
+ * Documentation: https://help.ubuntu.com
+ * Management: https://landscape.canonical.com
+ * Support: https://ubuntu.com/pro
+
+ System information as of Mon Feb 16 23:12:16 UTC 2026
+
+ System load: 0.0 Processes: 96
+ Usage of /: 23.1% of 9.04GB Users logged in: 0
+ Memory usage: 17% IPv4 address for eth0: 10.128.0.11
+ Swap usage: 0%
+
+
+Expanded Security Maintenance for Applications is not enabled.
+
+0 updates can be applied immediately.
+
+Enable ESM Apps to receive additional future security updates.
+See https://ubuntu.com/esm or run: sudo pro status
+
+
+
+The programs included with the Ubuntu system are free software;
+the exact distribution terms for each program are described in the
+individual files in /usr/share/doc/*/copyright.
+
+Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent permitted by
+applicable law.
+
+To run a command as administrator (user "root"), use "sudo ".
+See "man sudo_root" for details.
+
+ubuntu@lab04-vm:~$
+```
+
+---
+
+## 3. Pulumi Implementation
+
+### Pulumi Version and Language
+
+```bash
+Pulumi v3.220.0
+Python 3.9.6
+pulumi-yandex v0.13.0
+```
+
+**Language Choice: Python**
+
+### Code Differences from Terraform
+
+**1. Language Paradigm**
+
+**Terraform (Declarative HCL):**
+```hcl
+resource "yandex_vpc_network" "lab04_network" {
+ name = "lab04-network"
+ description = "Network for Lab 04 VM"
+}
+```
+
+**Pulumi (Imperative Python):**
+```python
+network = yandex.VpcNetwork(
+ "lab04-network",
+ name="lab04-pulumi-network",
+ description="Network for Lab 04 Pulumi VM",
+ folder_id=folder_id
+)
+```
+
+**Key Differences:**
+- Terraform: Resource blocks with attributes
+- Pulumi: Object instantiation with constructor arguments
+- Terraform: Static configuration
+- Pulumi: Can use variables, loops, functions naturally
+
+**2. Configuration Management**
+
+**Terraform:**
+```hcl
+# variables.tf
+variable "folder_id" {
+ description = "Yandex Cloud folder ID"
+ type = string
+}
+
+# terraform.tfvars
+folder_id = "b1g..."
+```
+
+**Pulumi:**
+```python
+# __main__.py
+config = pulumi.Config()
+folder_id = config.require("folder_id")
+
+# Command line
+pulumi config set lab04-pulumi:folder_id b1g...
+```
+
+**Key Differences:**
+- Terraform: Separate variable files
+- Pulumi: Config object in code
+- Terraform: tfvars files
+- Pulumi: Stack-specific YAML files or CLI commands
+
+**3. Outputs**
+
+**Terraform:**
+```hcl
+output "vm_public_ip" {
+ description = "Public IP address of the VM"
+ value = yandex_compute_instance.lab04_vm.network_interface[0].nat_ip_address
+}
+```
+
+**Pulumi:**
+```python
+pulumi.export("vm_public_ip", vm.network_interfaces[0].nat_ip_address)
+
+# For computed values
+pulumi.export("ssh_command", vm.network_interfaces[0].nat_ip_address.apply(
+ lambda ip: f"ssh {ssh_user}@{ip}"
+))
+```
+
+**Key Differences:**
+- Terraform: Output blocks
+- Pulumi: Export function calls
+- Pulumi: `.apply()` for working with computed values (Promises)
+
+**4. Resource Dependencies**
+
+**Terraform:**
+```hcl
+# Implicit dependencies through references
+resource "yandex_vpc_subnet" "lab04_subnet" {
+ network_id = yandex_vpc_network.lab04_network.id # Implicit dependency
+}
+```
+
+**Pulumi:**
+```python
+# Same implicit dependencies through references
+subnet = yandex.VpcSubnet(
+ "lab04-subnet",
+ network_id=network.id # Implicit dependency
+)
+```
+
+**Key Differences:**
+- Both handle dependencies automatically
+- Pulumi can use explicit `depends_on` if needed
+- Pulumi's type system helps catch errors earlier
+
+### Advantages Discovered
+
+**1. Programming Language Features**
+
+ **Loops and Conditionals:**
+```python
+# Easy to create multiple similar resources
+for i in range(3):
+ subnet = yandex.VpcSubnet(f"subnet-{i}", ...)
+
+# Conditional resource creation
+if config.get_bool("enable_monitoring"):
+ monitoring = yandex.MonitoringDashboard(...)
+```
+
+ **Functions and Reusability:**
+```python
+def create_security_rule(port, description):
+ return yandex.VpcSecurityGroupIngressArgs(
+ protocol="TCP",
+ description=description,
+ v4_cidr_blocks=["0.0.0.0/0"],
+ port=port
+ )
+
+# Use function to create rules
+ingress=[
+ create_security_rule(22, "Allow SSH"),
+ create_security_rule(80, "Allow HTTP"),
+ create_security_rule(5000, "Allow app port 5000"),
+]
+```
+
+ **Error Handling:**
+```python
+try:
+ with open(ssh_public_key_path, "r") as f:
+ ssh_public_key = f.read().strip()
+except FileNotFoundError:
+ raise Exception(f"SSH public key not found at {ssh_public_key_path}")
+```
+
+**2. IDE Support**
+
+ **Autocomplete:**
+- IDE suggests available properties
+- Type hints show expected types
+- Inline documentation
+
+ **Type Checking:**
+- Catch errors before deployment
+- Better refactoring support
+- Clear error messages
+
+**3. Testing Capabilities**
+
+ **Unit Tests:**
+```python
+# Can write unit tests for infrastructure
+import unittest
+from pulumi import runtime
+
+class TestInfrastructure(unittest.TestCase):
+ @pulumi.runtime.test
+ def test_vm_has_correct_size(self):
+ # Test infrastructure code
+ pass
+```
+
+**4. Secrets Management**
+
+ **Encrypted by Default:**
+```bash
+pulumi config set --secret github_token ghp_...
+# Automatically encrypted in Pulumi.*.yaml
+```
+
+ **No Plain Text in State:**
+- Secrets encrypted in state file
+- Safer than Terraform's plain text state
+
+### Challenges Encountered
+
+**1. Learning Curve**
+- **Issue**: Understanding Pulumi's async/promise model (`.apply()`)
+- **Solution**: Read documentation on Output types and computed values
+- **Learning**: Pulumi's Output type handles async resource creation
+
+**2. Provider Documentation**
+- **Issue**: Yandex Cloud Pulumi provider has less documentation than Terraform
+- **Solution**: Referred to Terraform docs and translated to Pulumi syntax
+- **Learning**: Terraform has larger community and more examples
+
+**4. Python Path Issues**
+- **Issue**: SSH key path with `~` not expanding correctly
+- **Solution**: Added manual path expansion in code
+- **Learning**: Need to handle OS-specific path issues in code
+
+### Pulumi Commands Output
+
+#### pulumi preview
+
+```bash
+pulumi preview
+Enter your passphrase to unlock config/secrets
+ (set PULUMI_CONFIG_PASSPHRASE or PULUMI_CONFIG_PASSPHRASE_FILE to remember):
+Enter your passphrase to unlock config/secrets
+Previewing update (dev):
+ Type Name Plan
+ + pulumi:pulumi:Stack lab04-pulumi-dev create
+ + ββ yandex:index:VpcNetwork lab04-network create
+ + ββ yandex:index:VpcSubnet lab04-subnet create
+ + ββ yandex:index:VpcSecurityGroup lab04-sg create
+ + ββ yandex:index:ComputeInstance lab04-vm create
+
+Outputs:
+ connection_info: {
+ private_ip : [unknown]
+ public_ip : [unknown]
+ ssh_command: [unknown]
+ ssh_user : "ubuntu"
+ }
+ network_id : [unknown]
+ ssh_command : [unknown]
+ subnet_id : [unknown]
+ vm_id : [unknown]
+ vm_name : "lab04-pulumi-vm"
+ vm_private_ip : [unknown]
+ vm_public_ip : [unknown]
+
+Resources:
+ + 5 to create
+
+(venv) newspec@10 pulumi %
+```
+
+#### pulumi up
+
+```bash
+pulumi up
+Enter your passphrase to unlock config/secrets
+ (set PULUMI_CONFIG_PASSPHRASE or PULUMI_CONFIG_PASSPHRASE_FILE to remember):
+Enter your passphrase to unlock config/secrets
+Previewing update (dev):
+ Type Name Plan
+ + pulumi:pulumi:Stack lab04-pulumi-dev create
+ + ββ yandex:index:VpcNetwork lab04-network create
+ + ββ yandex:index:VpcSubnet lab04-subnet create
+ + ββ yandex:index:VpcSecurityGroup lab04-sg create
+ + ββ yandex:index:ComputeInstance lab04-vm create
+
+Outputs:
+ connection_info: {
+ private_ip : [unknown]
+ public_ip : [unknown]
+ ssh_command: [unknown]
+ ssh_user : "ubuntu"
+ }
+ network_id : [unknown]
+ ssh_command : [unknown]
+ subnet_id : [unknown]
+ vm_id : [unknown]
+ vm_name : "lab04-pulumi-vm"
+ vm_private_ip : [unknown]
+ vm_public_ip : [unknown]
+
+Resources:
+ + 5 to create
+
+Do you want to perform this update? yes
+Updating (dev):
+ Type Name Status
+ + pulumi:pulumi:Stack lab04-pulumi-dev created (41s)
+ + ββ yandex:index:VpcNetwork lab04-network created (1s)
+ + ββ yandex:index:VpcSecurityGroup lab04-sg created (1s)
+ + ββ yandex:index:VpcSubnet lab04-subnet created (0.43s)
+ + ββ yandex:index:ComputeInstance lab04-vm created (38s)
+
+Outputs:
+ connection_info: {
+ private_ip : "10.128.0.13"
+ public_ip : "84.201.128.246"
+ ssh_command: "ssh ubuntu@84.201.128.246"
+ ssh_user : "ubuntu"
+ }
+ network_id : "enpej60jp6arufbqcu7g"
+ ssh_command : "ssh ubuntu@84.201.128.246"
+ subnet_id : "e9bdpptsdf2nafbj1s10"
+ vm_id : "fhmvjrq2012fqg0mloc8"
+ vm_name : "lab04-pulumi-vm"
+ vm_private_ip : "10.128.0.13"
+ vm_public_ip : "84.201.128.246"
+
+Resources:
+ + 5 created
+
+Duration: 42s
+```
+
+### SSH Connection Verification
+
+```bash
+ssh -i ~/.ssh/yandex_cloud_key ubuntu@84.201.128.246
+Welcome to Ubuntu 24.04.4 LTS (GNU/Linux 6.8.0-100-generic x86_64)
+
+ * Documentation: https://help.ubuntu.com
+ * Management: https://landscape.canonical.com
+ * Support: https://ubuntu.com/pro
+
+ System information as of Mon Feb 16 23:50:44 UTC 2026
+
+ System load: 0.01 Processes: 99
+ Usage of /: 23.1% of 9.04GB Users logged in: 0
+ Memory usage: 17% IPv4 address for eth0: 10.128.0.13
+ Swap usage: 0%
+
+
+Expanded Security Maintenance for Applications is not enabled.
+
+0 updates can be applied immediately.
+
+Enable ESM Apps to receive additional future security updates.
+See https://ubuntu.com/esm or run: sudo pro status
+
+
+
+The programs included with the Ubuntu system are free software;
+the exact distribution terms for each program are described in the
+individual files in /usr/share/doc/*/copyright.
+
+Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent permitted by
+applicable law.
+
+
+To run a command as administrator (user "root"), use "sudo ".
+See "man sudo_root" for details.
+
+ubuntu@lab04-pulumi-vm:~$
+```
+---
+
+## 4. Terraform vs Pulumi Comparison
+
+### Ease of Learning
+
+**Terraform: (4/5)**
+
+**Pros:**
+- Simple, declarative syntax
+- Easy to understand resource blocks
+- Extensive documentation and examples
+- Large community with many tutorials
+- Consistent patterns across providers
+
+**Cons:**
+- Need to learn HCL syntax
+- Limited logic capabilities
+- Some concepts (count, for_each) can be confusing
+
+**Pulumi: (3/5)**
+
+**Pros:**
+- Use familiar programming language
+- No new syntax to learn (if you know Python)
+- Natural use of variables and functions
+
+**Cons:**
+- Need to understand Output/Promise model
+- Async concepts can be confusing
+- Less community content and examples
+- Requires programming knowledge
+
+**Winner: Terraform** - Lower barrier to entry, especially for those without programming background.
+
+### Code Readability
+
+**Terraform: (5/5)**
+
+**Pros:**
+- Very clear and declarative
+- Easy to see what infrastructure will be created
+- Consistent structure across all resources
+- Self-documenting with descriptions
+
+**Pulumi: (4/5)**
+
+**Pros:**
+- Familiar Python syntax
+- Can add comments and documentation strings
+- Type hints improve clarity
+- IDE shows inline documentation
+
+**Cons:**
+- More verbose than HCL
+- Mixing infrastructure and logic can reduce clarity
+- Need to understand Python conventions
+
+**Winner: Terraform** - More concise and purpose-built for infrastructure.
+
+### Debugging
+
+**Terraform: (3/5)**
+
+**Pros:**
+- Clear error messages
+- `terraform plan` shows what will change
+- Can use `terraform console` for testing expressions
+- State file helps understand current state
+
+**Cons:**
+- Limited debugging tools
+- Hard to debug complex expressions
+- No step-through debugging
+- Error messages can be cryptic for complex scenarios
+
+**Pulumi: (4/5)**
+
+**Pros:**
+- Can use Python debugger (pdb)
+- IDE debugging support
+- Better error messages with stack traces
+- Can add print statements for debugging
+- Unit testing capabilities
+
+**Cons:**
+- Async nature can complicate debugging
+- Output types require `.apply()` understanding
+
+**Winner: Pulumi** - Full programming language debugging capabilities.
+
+### Documentation
+
+**Terraform: (5/5)**
+
+**Pros:**
+- Extensive official documentation
+- Large community with many examples
+- Provider documentation in Terraform Registry
+- Many tutorials and courses
+- Stack Overflow has many answers
+
+**Cons:**
+- Documentation can be overwhelming
+- Some providers have better docs than others
+
+**Pulumi: (3/5)**
+
+**Pros:**
+- Good official documentation
+- API reference auto-generated
+- Examples in multiple languages
+- Good getting started guides
+
+**Cons:**
+- Smaller community
+- Fewer third-party tutorials
+- Less Stack Overflow content
+- Provider docs sometimes less detailed
+
+**Winner: Terraform** - Much larger ecosystem and community.
+
+### Use Cases
+
+**When to Use Terraform:**
+
+ **Simple to Medium Infrastructure**
+- Straightforward resource provisioning
+- Standard cloud patterns
+- Team prefers declarative approach
+
+ **Multi-Cloud Deployments**
+- Largest provider ecosystem
+- Consistent syntax across clouds
+- Mature and stable
+
+ **Compliance and Governance**
+- Clear audit trail
+- Policy as code (Sentinel)
+- Established best practices
+
+ **Team Without Programming Background**
+- DevOps/Ops teams
+- Infrastructure-focused roles
+- Lower learning curve
+
+**When to Use Pulumi:**
+
+ **Complex Infrastructure Logic**
+- Dynamic resource creation
+- Complex conditionals
+- Advanced transformations
+
+ **Developer-Centric Teams**
+- Software engineers managing infrastructure
+- Want to use familiar languages
+- Need testing capabilities
+
+ **Reusable Components**
+- Building infrastructure libraries
+- Sharing code via packages
+- Higher-level abstractions
+
+ **Better Secrets Management**
+- Need encrypted secrets
+- Compliance requirements
+- Sensitive data handling
+
+## 5. Lab 5 Preparation & Cleanup
+
+### VM for Lab 5
+
+**Are you keeping your VM for Lab 5?** No
+
+**What will you use for Lab 5?** Will recreate cloud VM
+
+**Terrafrom destroy**:
+```bash
+terraform destroy
+var.cloud_id
+ Yandex Cloud ID
+
+ Enter a value: ********
+
+github_repository.devops_course: Refreshing state... [id=DevOps-Core-Course]
+data.yandex_compute_image.ubuntu: Reading...
+data.yandex_compute_image.ubuntu: Read complete after 0s [id=*******]
+
+Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
+ - destroy
+
+Terraform will perform the following actions:
+
+ # github_repository.devops_course will be destroyed
+ - resource "github_repository" "devops_course" {
+ - allow_auto_merge = false -> null
+ - allow_merge_commit = true -> null
+ - allow_rebase_merge = true -> null
+ - allow_squash_merge = true -> null
+ - allow_update_branch = false -> null
+ - archived = false -> null
+ - auto_init = false -> null
+ - default_branch = "master" -> null
+ - delete_branch_on_merge = true -> null
+ - description = "DevOps Engineering: Core Practices - Lab assignments and projects" -> null
+ - etag = "W/\"8f88878a50eedec268e373e039998430cbf194a2a9e0c3ff93a27116412b1b69\"" -> null
+ - full_name = "newspec/DevOps-Core-Course" -> null
+ - git_clone_url = "git://github.com/newspec/DevOps-Core-Course.git" -> null
+ - has_discussions = false -> null
+ - has_downloads = true -> null
+ - has_issues = true -> null
+ - has_projects = false -> null
+ - has_wiki = false -> null
+ - html_url = "https://github.com/newspec/DevOps-Core-Course" -> null
+ - http_clone_url = "https://github.com/newspec/DevOps-Core-Course.git" -> null
+ - id = "DevOps-Core-Course" -> null
+ - is_template = false -> null
+ - merge_commit_message = "PR_TITLE" -> null
+ - merge_commit_title = "MERGE_MESSAGE" -> null
+ - name = "DevOps-Core-Course" -> null
+ - node_id = "R_kgDORA7Qvw" -> null
+ - private = false -> null
+ - repo_id = 1141821631 -> null
+ - squash_merge_commit_message = "COMMIT_MESSAGES" -> null
+ - squash_merge_commit_title = "COMMIT_OR_PR_TITLE" -> null
+ - ssh_clone_url = "git@github.com:newspec/DevOps-Core-Course.git" -> null
+ - svn_url = "https://github.com/newspec/DevOps-Core-Course" -> null
+ - topics = [
+ - "ansible",
+ - "ci-cd",
+ - "devops",
+ - "docker",
+ - "infrastructure-as-code",
+ - "kubernetes",
+ - "pulumi",
+ - "terraform",
+ ] -> null
+ - visibility = "public" -> null
+ - vulnerability_alerts = false -> null
+ - web_commit_signoff_required = false -> null
+
+ - security_and_analysis {
+ - secret_scanning {
+ - status = "enabled" -> null
+ }
+ - secret_scanning_push_protection {
+ - status = "enabled" -> null
+ }
+ }
+ }
+
+Plan: 0 to add, 0 to change, 1 to destroy.
+
+Changes to Outputs:
+ - vm_name = "lab04-vm" -> null
+
+Do you really want to destroy all resources?
+ Terraform will destroy all your managed infrastructure, as shown above.
+ There is no undo. Only 'yes' will be accepted to confirm.
+
+ Enter a value: yes
+
+github_repository.devops_course: Destroying... [id=DevOps-Core-Course]
+β·
+β Error: DELETE https://api.github.com/repos/newspec/DevOps-Core-Course: 403 Must have admin rights to Repository. []
+β
+β
+β΅
+```
+**pulumi destroy**:
+```bash
+pulumi destroy
+Enter your passphrase to unlock config/secrets
+ (set PULUMI_CONFIG_PASSPHRASE or PULUMI_CONFIG_PASSPHRASE_FILE to remember):
+Enter your passphrase to unlock config/secrets
+Previewing destroy (dev):
+ Type Name Plan
+ - pulumi:pulumi:Stack lab04-pulumi-dev delete
+ - ββ yandex:index:VpcNetwork lab04-network delete
+ - ββ yandex:index:ComputeInstance lab04-vm delete
+ - ββ yandex:index:VpcSubnet lab04-subnet delete
+ - ββ yandex:index:VpcSecurityGroup lab04-sg delete
+
+Outputs:
+ - connection_info: {
+ - private_ip : "10.128.0.13"
+ - public_ip : "84.201.128.246"
+ - ssh_command: "ssh ubuntu@84.201.128.246"
+ - ssh_user : "ubuntu"
+ }
+ - network_id : "enpej60jp6arufbqcu7g"
+ - ssh_command : "ssh ubuntu@84.201.128.246"
+ - subnet_id : "e9bdpptsdf2nafbj1s10"
+ - vm_id : "fhmvjrq2012fqg0mloc8"
+ - vm_name : "lab04-pulumi-vm"
+ - vm_private_ip : "10.128.0.13"
+ - vm_public_ip : "84.201.128.246"
+
+Resources:
+ - 5 to delete
+
+Do you want to perform this destroy? yes
+Destroying (dev):
+ Type Name Status
+ - pulumi:pulumi:Stack lab04-pulumi-dev deleted (0.01s)
+ - ββ yandex:index:ComputeInstance lab04-vm deleted (33s)
+ - ββ yandex:index:VpcSubnet lab04-subnet deleted (4s)
+ - ββ yandex:index:VpcSecurityGroup lab04-sg deleted (0.43s)
+ - ββ yandex:index:VpcNetwork lab04-network deleted (1s)
+
+Outputs:
+ - connection_info: {
+ - private_ip : "10.128.0.13"
+ - public_ip : "84.201.128.246"
+ - ssh_command: "ssh ubuntu@84.201.128.246"
+ - ssh_user : "ubuntu"
+ }
+ - network_id : "enpej60jp6arufbqcu7g"
+ - ssh_command : "ssh ubuntu@84.201.128.246"
+ - subnet_id : "e9bdpptsdf2nafbj1s10"
+ - vm_id : "fhmvjrq2012fqg0mloc8"
+ - vm_name : "lab04-pulumi-vm"
+ - vm_private_ip : "10.128.0.13"
+ - vm_public_ip : "84.201.128.246"
+
+Resources:
+ - 5 deleted
+
+Duration: 40s
+
+The resources in the stack have been deleted, but the history and configuration associated with the stack are still maintained.
+If you want to remove the stack completely, run `pulumi stack rm dev`.
+```
+
+**screenshot showing resource status:** 
diff --git a/docs/image.png b/docs/image.png
new file mode 100644
index 0000000000..83ce4aa12f
Binary files /dev/null and b/docs/image.png differ
diff --git a/pulumi/Pulumi.yaml b/pulumi/Pulumi.yaml
new file mode 100644
index 0000000000..0e9b5069df
--- /dev/null
+++ b/pulumi/Pulumi.yaml
@@ -0,0 +1,3 @@
+name: lab04-pulumi
+runtime: python
+description: Lab 04 - Infrastructure as Code with Pulumi (Yandex Cloud)
\ No newline at end of file
diff --git a/pulumi/__main__.py b/pulumi/__main__.py
new file mode 100644
index 0000000000..e719228a87
--- /dev/null
+++ b/pulumi/__main__.py
@@ -0,0 +1,150 @@
+"""
+Lab 04 - Pulumi Infrastructure as Code
+Provisions a VM on Yandex Cloud with network and security configuration
+"""
+
+import pulumi
+import pulumi_yandex as yandex
+
+# Get configuration
+config = pulumi.Config()
+folder_id = config.require("folder_id")
+zone = config.get("zone") or "ru-central1-a"
+vm_name = config.get("vm_name") or "lab04-pulumi-vm"
+ssh_user = config.get("ssh_user") or "ubuntu"
+ssh_public_key_path = config.get("ssh_public_key_path") or "~/.ssh/id_rsa.pub"
+# CIDR allowed to SSH, e.g. 203.0.113.10/32
+ssh_allowed_cidr = config.require("ssh_allowed_cidr")
+
+# Read SSH public key
+ssh_key_expanded = ssh_public_key_path.replace(
+ "~", pulumi.runtime.get_config("HOME") or "~"
+)
+with open(ssh_key_expanded, "r") as f:
+ ssh_public_key = f.read().strip()
+
+# Get latest Ubuntu 24.04 image
+ubuntu_image = yandex.get_compute_image(
+ family="ubuntu-2404-lts",
+ folder_id="standard-images"
+)
+
+# Create VPC network
+network = yandex.VpcNetwork(
+ "lab04-network",
+ name="lab04-pulumi-network",
+ description="Network for Lab 04 Pulumi VM",
+ folder_id=folder_id
+)
+
+# Create subnet
+subnet = yandex.VpcSubnet(
+ "lab04-subnet",
+ name="lab04-pulumi-subnet",
+ description="Subnet for Lab 04 Pulumi VM",
+ v4_cidr_blocks=["10.128.0.0/24"],
+ zone=zone,
+ network_id=network.id,
+ folder_id=folder_id
+)
+
+# Create security group
+security_group = yandex.VpcSecurityGroup(
+ "lab04-sg",
+ name="lab04-pulumi-security-group",
+ description="Security group for Lab 04 Pulumi VM",
+ network_id=network.id,
+ folder_id=folder_id,
+ ingresses=[
+ # Allow SSH from specific IP
+ yandex.VpcSecurityGroupIngressArgs(
+ protocol="TCP",
+ description="Allow SSH from my IP",
+ v4_cidr_blocks=[ssh_allowed_cidr],
+ port=22
+ ),
+ # Allow HTTP
+ yandex.VpcSecurityGroupIngressArgs(
+ protocol="TCP",
+ description="Allow HTTP",
+ v4_cidr_blocks=["0.0.0.0/0"],
+ port=80
+ ),
+ # Allow custom port 5000
+ yandex.VpcSecurityGroupIngressArgs(
+ protocol="TCP",
+ description="Allow app port 5000",
+ v4_cidr_blocks=["0.0.0.0/0"],
+ port=5000
+ ),
+ ],
+ egresses=[
+ # Allow all outbound traffic
+ yandex.VpcSecurityGroupEgressArgs(
+ protocol="ANY",
+ description="Allow all outbound traffic",
+ v4_cidr_blocks=["0.0.0.0/0"],
+ from_port=0,
+ to_port=65535
+ ),
+ ]
+)
+
+# Create VM instance
+vm = yandex.ComputeInstance(
+ "lab04-vm",
+ name=vm_name,
+ hostname=vm_name,
+ platform_id="standard-v2",
+ zone=zone,
+ folder_id=folder_id,
+ resources=yandex.ComputeInstanceResourcesArgs(
+ cores=2,
+ memory=1,
+ core_fraction=20 # Free tier: 20% CPU
+ ),
+ boot_disk=yandex.ComputeInstanceBootDiskArgs(
+ initialize_params=yandex.ComputeInstanceBootDiskInitializeParamsArgs(
+ image_id=ubuntu_image.id,
+ size=10, # 10 GB
+ type="network-hdd"
+ )
+ ),
+ network_interfaces=[
+ yandex.ComputeInstanceNetworkInterfaceArgs(
+ subnet_id=subnet.id,
+ nat=True, # Assign public IP
+ security_group_ids=[security_group.id]
+ )
+ ],
+ metadata={
+ "ssh-keys": f"{ssh_user}:{ssh_public_key}"
+ },
+ labels={
+ "environment": "lab04",
+ "managed_by": "pulumi",
+ "purpose": "devops-course"
+ },
+ scheduling_policy=yandex.ComputeInstanceSchedulingPolicyArgs(
+ preemptible=False
+ )
+)
+
+# Export outputs
+pulumi.export("vm_id", vm.id)
+pulumi.export("vm_name", vm.name)
+pulumi.export("vm_public_ip", vm.network_interfaces[0].nat_ip_address)
+pulumi.export("vm_private_ip", vm.network_interfaces[0].ip_address)
+pulumi.export("network_id", network.id)
+pulumi.export("subnet_id", subnet.id)
+pulumi.export("ssh_command", vm.network_interfaces[0].nat_ip_address.apply(
+ lambda ip: f"ssh {ssh_user}@{ip}"
+))
+pulumi.export("connection_info", {
+ "public_ip": vm.network_interfaces[0].nat_ip_address,
+ "private_ip": vm.network_interfaces[0].ip_address,
+ "ssh_user": ssh_user,
+ "ssh_command": vm.network_interfaces[0].nat_ip_address.apply(
+ lambda ip: f"ssh {ssh_user}@{ip}"
+ )
+})
diff --git a/pulumi/requirements.txt b/pulumi/requirements.txt
new file mode 100644
index 0000000000..2356228903
--- /dev/null
+++ b/pulumi/requirements.txt
@@ -0,0 +1,2 @@
+pulumi>=3.0.0,<4.0.0
+pulumi-yandex>=0.13.0
\ No newline at end of file
diff --git a/terraform/.terraformrc b/terraform/.terraformrc
new file mode 100644
index 0000000000..9bc7728211
--- /dev/null
+++ b/terraform/.terraformrc
@@ -0,0 +1,9 @@
+provider_installation {
+ network_mirror {
+ url = "https://terraform-mirror.yandexcloud.net/"
+ include = ["registry.terraform.io/*/*"]
+ }
+ direct {
+ exclude = ["registry.terraform.io/*/*"]
+ }
+}
\ No newline at end of file
diff --git a/terraform/.tflint.hcl b/terraform/.tflint.hcl
new file mode 100644
index 0000000000..96e00d361b
--- /dev/null
+++ b/terraform/.tflint.hcl
@@ -0,0 +1,24 @@
+plugin "terraform" {
+ enabled = true
+ preset = "recommended"
+}
+
+rule "terraform_naming_convention" {
+ enabled = true
+}
+
+rule "terraform_documented_variables" {
+ enabled = true
+}
+
+rule "terraform_documented_outputs" {
+ enabled = true
+}
+
+rule "terraform_unused_declarations" {
+ enabled = true
+}
+
+rule "terraform_deprecated_index" {
+ enabled = true
+}
\ No newline at end of file
diff --git a/terraform/github.tf b/terraform/github.tf
new file mode 100644
index 0000000000..39b5a40c02
--- /dev/null
+++ b/terraform/github.tf
@@ -0,0 +1,52 @@
+# GitHub Provider Configuration for Repository Management
+# This file demonstrates importing existing infrastructure into Terraform
+# Note: required_providers for github is defined in main.tf
+
+provider "github" {
+ token = var.github_token
+ owner = var.github_owner
+}
+
+# Import existing DevOps-Core-Course repository
+resource "github_repository" "devops_course" {
+ name = "DevOps-Core-Course"
+ description = "DevOps Engineering: Core Practices - Lab assignments and projects"
+ visibility = "public"
+
+ has_issues = true
+ has_wiki = false
+ has_projects = false
+ has_downloads = true
+
+ allow_merge_commit = true
+ allow_squash_merge = true
+ allow_rebase_merge = true
+ allow_auto_merge = false
+
+ delete_branch_on_merge = true
+
+ topics = [
+ "devops",
+ "terraform",
+ "pulumi",
+ "docker",
+ "kubernetes",
+ "ansible",
+ "ci-cd",
+ "infrastructure-as-code"
+ ]
+}
+
+# Branch protection for master branch (optional)
+resource "github_branch_protection" "master_protection" {
+ repository_id = github_repository.devops_course.node_id
+ pattern = "master"
+
+ required_pull_request_reviews {
+ dismiss_stale_reviews = true
+ require_code_owner_reviews = false
+ required_approving_review_count = 0
+ }
+
+ enforce_admins = false
+}
\ No newline at end of file
diff --git a/terraform/main.tf b/terraform/main.tf
new file mode 100644
index 0000000000..1b24dfba9f
--- /dev/null
+++ b/terraform/main.tf
@@ -0,0 +1,118 @@
+terraform {
+ required_providers {
+ yandex = {
+ source = "yandex-cloud/yandex"
+ version = "~> 0.187"
+ }
+ github = {
+ source = "integrations/github"
+ version = "~> 5.0"
+ }
+ }
+ required_version = ">= 1.9.0"
+}
+
+provider "yandex" {
+ service_account_key_file = pathexpand(var.service_account_key_file)
+ cloud_id = var.cloud_id
+ folder_id = var.folder_id
+ zone = var.zone
+}
+
+# Get latest Ubuntu 24.04 image
+data "yandex_compute_image" "ubuntu" {
+ family = "ubuntu-2404-lts"
+}
+
+# Create VPC network
+resource "yandex_vpc_network" "lab04_network" {
+ name = "lab04-network"
+ description = "Network for Lab 04 VM"
+}
+
+# Create subnet
+resource "yandex_vpc_subnet" "lab04_subnet" {
+ name = "lab04-subnet"
+ description = "Subnet for Lab 04 VM"
+ v4_cidr_blocks = ["10.128.0.0/24"]
+ zone = var.zone
+ network_id = yandex_vpc_network.lab04_network.id
+}
+
+# Create security group with required rules
+resource "yandex_vpc_security_group" "lab04_sg" {
+ name = "lab04-sg"
+ description = "Lab04 security group"
+ network_id = yandex_vpc_network.lab04_network.id
+
+ ingress {
+ protocol = "TCP"
+ description = "SSH from my IP"
+ v4_cidr_blocks = [var.ssh_allowed_cidr]
+ port = 22
+ }
+
+ ingress {
+ protocol = "TCP"
+ description = "HTTP"
+ v4_cidr_blocks = ["0.0.0.0/0"]
+ port = 80
+ }
+
+ ingress {
+ protocol = "TCP"
+ description = "App 5000"
+ v4_cidr_blocks = ["0.0.0.0/0"]
+ port = 5000
+ }
+
+ egress {
+ protocol = "ANY"
+ description = "Allow all egress"
+ v4_cidr_blocks = ["0.0.0.0/0"]
+ from_port = 0
+ to_port = 65535
+ }
+}
+
+# Create VM instance
+resource "yandex_compute_instance" "lab04_vm" {
+ name = var.vm_name
+ hostname = var.vm_name
+ platform_id = "standard-v2"
+ zone = var.zone
+
+ resources {
+ cores = 2
+ memory = 1
+ core_fraction = 20 # Free tier: 20% CPU
+ }
+
+ boot_disk {
+ initialize_params {
+ image_id = data.yandex_compute_image.ubuntu.id
+ size = 10 # 10 GB HDD
+ type = "network-hdd"
+ }
+ }
+
+ network_interface {
+ subnet_id = yandex_vpc_subnet.lab04_subnet.id
+ nat = true # Assign public IP
+ security_group_ids = [yandex_vpc_security_group.lab04_sg.id]
+ }
+
+ metadata = {
+ ssh-keys = "${var.ssh_user}:${file(var.ssh_public_key_path)}"
+ }
+
+ labels = {
+ environment = "lab04"
+ managed_by = "terraform"
+ purpose = "devops-course"
+ }
+
+ scheduling_policy {
+ preemptible = false
+ }
+}
\ No newline at end of file
diff --git a/terraform/outputs.tf b/terraform/outputs.tf
new file mode 100644
index 0000000000..0699bacaae
--- /dev/null
+++ b/terraform/outputs.tf
@@ -0,0 +1,53 @@
+# VM outputs
+output "vm_id" {
+ description = "ID of the created VM"
+ value = yandex_compute_instance.lab04_vm.id
+}
+
+output "vm_name" {
+ description = "Name of the created VM"
+ value = yandex_compute_instance.lab04_vm.name
+}
+
+output "vm_public_ip" {
+ description = "Public IP address of the VM"
+ value = yandex_compute_instance.lab04_vm.network_interface[0].nat_ip_address
+}
+
+output "vm_private_ip" {
+ description = "Private IP address of the VM"
+ value = yandex_compute_instance.lab04_vm.network_interface[0].ip_address
+}
+
+# Network outputs
+output "network_id" {
+ description = "ID of the VPC network"
+ value = yandex_vpc_network.lab04_network.id
+}
+
+output "subnet_id" {
+ description = "ID of the subnet"
+ value = yandex_vpc_subnet.lab04_subnet.id
+}
+
+output "security_group_id" {
+ description = "ID of the security group"
+ value = yandex_vpc_security_group.lab04_sg.id
+}
+
+# SSH connection command
+output "ssh_command" {
+ description = "SSH command to connect to the VM"
+ value = "ssh ${var.ssh_user}@${yandex_compute_instance.lab04_vm.network_interface[0].nat_ip_address}"
+}
+
+# Connection info
+output "connection_info" {
+ description = "Complete connection information"
+ value = {
+ public_ip = yandex_compute_instance.lab04_vm.network_interface[0].nat_ip_address
+ private_ip = yandex_compute_instance.lab04_vm.network_interface[0].ip_address
+ ssh_user = var.ssh_user
+ ssh_command = "ssh ${var.ssh_user}@${yandex_compute_instance.lab04_vm.network_interface[0].nat_ip_address}"
+ }
+}
\ No newline at end of file
diff --git a/terraform/variables.tf b/terraform/variables.tf
new file mode 100644
index 0000000000..5efc57b2e9
--- /dev/null
+++ b/terraform/variables.tf
@@ -0,0 +1,61 @@
+# Yandex Cloud configuration
+variable "cloud_id" {
+ description = "Yandex Cloud ID"
+ type = string
+}
+
+variable "folder_id" {
+ description = "Yandex Cloud folder ID"
+ type = string
+}
+
+variable "service_account_key_file" {
+ description = "Path to service account key file (JSON)"
+ type = string
+ default = "~/.config/yandex-cloud/key.json"
+}
+
+variable "zone" {
+ description = "Yandex Cloud availability zone"
+ type = string
+ default = "ru-central1-a"
+}
+
+# VM configuration
+variable "vm_name" {
+ description = "Name of the virtual machine"
+ type = string
+ default = "lab04-vm"
+}
+
+# SSH configuration
+variable "ssh_user" {
+ description = "SSH username for VM access"
+ type = string
+ default = "ubuntu"
+}
+
+variable "ssh_public_key_path" {
+ description = "Path to SSH public key file"
+ type = string
+ default = "~/.ssh/id_rsa.pub"
+}
+
+variable "ssh_allowed_cidr" {
+ description = "CIDR allowed to SSH, e.g. 203.0.113.10/32"
+ type = string
+}
+
+# GitHub configuration (for bonus task)
+variable "github_token" {
+ description = "GitHub personal access token"
+ type = string
+ sensitive = true
+ default = ""
+}
+
+variable "github_owner" {
+ description = "GitHub repository owner (username or organization)"
+ type = string
+ default = ""
+}
\ No newline at end of file