diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 185a00e0c5490c345f5d152380cb345588bad600..c072396f072c3df05c3e8b22f2f992dc9a11e688 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -23,8 +23,8 @@ jobs:
         with:
           token: ${{ secrets.CODECOV_TOKEN }}
 
-  test-kubernetes:
-    name: e2e k3s ${{ matrix.k3s }}
+  kubernetes:
+    name: kubernetes ${{ matrix.k3s }}
     runs-on: ubuntu-latest
 
     permissions:
@@ -33,21 +33,22 @@ jobs:
     concurrency: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.k3s }}
 
     strategy:
-      # The e2e tests are flaky and often one of the jobs fails. The default setting
-      # causes all other currently running jobs to abort and all need to be restarted.
-      fail-fast: false
+      fail-fast: false # Continue tests matrix if a flaky run occur.
       matrix:
         include:
-          # All k3s after January 2024 break our e2e tests, we hardcode
-          # the versions for now until we can fix the source of this.
-          - k3s: v1.26.12+k3s1
-            k8s-test: v1.26.12
-          - k3s: v1.27.9+k3s1
-            k8s-test: v1.27.9
-          - k3s: v1.28.5+k3s1
-            k8s-test: v1.28.5
-          - k3s: v1.29.0+k3s1
-            k8s-test: v1.29.0
+          - k3s: v1.26
+            k8s-test: v1.26.15
+          - k3s: v1.27
+            k8s-test: v1.27.15
+          - k3s: v1.28
+            k8s-test: v1.28.11
+          - k3s: v1.29
+            k8s-test: v1.29.6
+
+    env:
+      K3S_CHANNEL: ${{ matrix.k3s }}
+      K8S_TEST_VERSION: ${{ matrix.k8s-test }}
+      ENV: gha-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.k3s }}
 
     steps:
       - uses: actions/checkout@v4
@@ -56,9 +57,12 @@ jobs:
         with:
           go-version-file: go.mod
 
-      - uses: hetznercloud/tps-action@main
+      - uses: opentofu/setup-opentofu@v1
+        with:
+          tofu_version: v1.7.2 # renovate: datasource=github-releases depName=opentofu/opentofu
+          tofu_wrapper: false
 
-      - uses: hetznercloud/setup-hcloud@v1
+      - uses: docker/setup-buildx-action@v3
 
       - uses: yokawasa/action-setup-kube-tools@v0.11.1
         with:
@@ -66,26 +70,35 @@ jobs:
             helm
             kubectl
             skaffold
-          helm: v3.11.2
-          kubectl: v1.29.0
-          skaffold: v2.3.0
+          helm: v3.15.2 # renovate: datasource=github-releases depName=helm/helm
+          kubectl: v1.29.6 # renovate: datasource=github-releases depName=kubernetes/kubernetes
+          skaffold: v2.12.0 # renovate: datasource=github-releases depName=GoogleContainerTools/skaffold
 
-      - name: Run tests
-        env:
-          K3S_VERSION: ${{ matrix.k3s }}
-          K8S_TEST_VERSION: ${{ matrix.k8s-test }}
-          SCOPE: gha-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.k3s }}
-        run: |
-          curl -sLS https://get.k3sup.dev | sh
+      - name: Setup k3sup
+        run: curl -sLS https://get.k3sup.dev | sh
+
+      - name: Setup k8s test binaries
+        run: make -C test/e2e/kubernetes bin
 
-          trap "hack/dev-down.sh" EXIT
-          source <(hack/dev-up.sh)
+      - uses: hetznercloud/tps-action@main
 
-          skaffold build --tag="e2e-${GITHUB_RUN_ID}-${GITHUB_RUN_NUMBER}"
-          tag=$(skaffold build --tag="e2e-${GITHUB_RUN_ID}-${GITHUB_RUN_NUMBER}" --quiet --output="{{ (index .Builds 0).Tag }}")
-          skaffold deploy --images=docker.io/hetznercloud/hcloud-csi-driver=$tag
+      - name: Setup environment
+        run: make -C dev up
 
-          test/e2e/kubernetes/run-e2e-tests.sh
+      - name: Run skaffold
+        run: |
+          source dev/files/env.sh
+          skaffold run
+
+      - name: Run parallel tests
+        run: |
+          source dev/files/env.sh
+          make -C test/e2e/kubernetes parallel
+
+      - name: Run serial tests
+        run: |
+          source dev/files/env.sh
+          make -C test/e2e/kubernetes serial
 
   deploy-manifests:
     runs-on: ubuntu-latest
@@ -95,7 +108,7 @@ jobs:
         with:
           setup-tools: |
             helm
-          helm: v3.11.2
+          helm: v3.11.2 # renovate: datasource=github-releases depName=helm/helm
 
       - uses: actions/checkout@v4
 
@@ -117,7 +130,7 @@ jobs:
         with:
           setup-tools: |
             helm
-          helm: v3.11.2
+          helm: v3.11.2 # renovate: datasource=github-releases depName=helm/helm
 
       - uses: actions/checkout@v4
 
diff --git a/.gitignore b/.gitignore
index 6fa7c4f83f9572b68653822a2e30272d9644a10e..a904188b74bf53e4116907244131f58fa59cf5ff 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,9 +1,6 @@
 # rootfs for docker plugin
 deploy/docker-swarm/pkg/plugin
 
-# Tmp files for dev env
-hack/.*
-
 # Build output
 dist/
 hcloud-csi-*.tgz
diff --git a/README.md b/README.md
index 1c3d41bc36a5b5caed9030c196a6d9da68f56a98..80b4759102a400a31d89b19b5544d49bd0fa2aa8 100644
--- a/README.md
+++ b/README.md
@@ -17,117 +17,77 @@ relevant to that Container Orchestrator behind the link:
 - [Docker Swarm](./docs/docker-swarm/README.md)️ _⚠️ Not officially supported_
 - [HashiCorp Nomad](./docs/nomad/README.md)️ _⚠️ Not officially supported_
 
-## Tests
+## Development
 
-### Integration Tests
+### Setup a development environment
 
-**Requirements: Docker**
+To setup a development environment, make sure you installed the following tools:
 
-The core operations like publishing and resizing can be tested locally with Docker.
+- [tofu](https://opentofu.org/)
+- [k3sup](https://github.com/alexellis/k3sup)
+- [docker](https://www.docker.com/)
+- [skaffold](https://skaffold.dev/)
 
-```bash
-go test $(go list ./... | grep integration) -v
-```
+1. Configure a `HCLOUD_TOKEN` in your shell session.
 
-### E2E Tests
+> [!WARNING]
+> The development environment runs on Hetzner Cloud servers which will induce costs.
 
-> ⚠️ Kubernetes E2E Tests were recently refactored and the docs are now outdated.
-> See the [GitHub Actions workflow](.github/workflows/test_e2e.yml) for an
-> up-to-date script to run the e2e tests.
+2. Deploy the development cluster:
 
-The Hetzner Cloud CSI Driver was tested against the official k8s e2e
-tests for a specific version. You can run the tests with the following
-commands. Keep in mind, that these tests run on real cloud servers and
-will create volumes that will be billed.
+```sh
+make -C dev up
+```
 
-**Test Server Setup**:
+3. Load the generated configuration to access the development cluster:
 
-1x CPX21 (Ubuntu 18.04)
+```sh
+source dev/files/env.sh
+```
 
-**Requirements: Docker and Go 1.17**
+4. Check that the development cluster is healthy:
 
-1. Configure your environment correctly
-   ```bash
-   export HCLOUD_TOKEN=<specify a project token>
-   export K8S_VERSION=1.21.0 # The specific (latest) version is needed here
-   export USE_SSH_KEYS=key1,key2 # Name or IDs of your SSH Keys within the Hetzner Cloud, the servers will be accessible with that keys
-   ```
-2. Run the tests
-   ```bash
-   go test $(go list ./... | grep e2e) -v -timeout 60m
-   ```
+```sh
+kubectl get nodes -o wide
+```
 
-The tests will now run, this will take a while (~30 min).
+5. Start developing the CSI driver in the development cluster:
 
-**If the tests fail, make sure to clean up the project with the Hetzner Cloud Console or the hcloud cli.**
+```sh
+skaffold dev
+```
 
-### Local test setup  
+On code change, skaffold will rebuild the image, redeploy it and print all logs from csi components.
 
-> ⚠️ Local Kubernetes Dev Setup was recently refactored and the docs are now
-> outdated. Check out the scripts [dev-up.sh](hack/dev-up.sh) &
-> [dev-down.sh](hack/dev-down.sh) for an automatic dev setup.
+⚠️ Do not forget to clean up the development cluster once are finished:
 
-This repository provides [skaffold](https://skaffold.dev/) to easily deploy / debug this driver on demand
+```sh
+make -C dev down
+```
 
-#### Requirements
-1. Install [hcloud-cli](https://github.com/hetznercloud/cli)
-2. Install [k3sup](https://github.com/alexellis/k3sup)
-3. Install [cilium](https://github.com/cilium/cilium-cli)
-4. Install [docker](https://www.docker.com/)
+### Run the docker e2e tests
 
-You will also need to set a `HCLOUD_TOKEN` in your shell session
+To run the integrations tests, make sure you installed the following tools:
 
-#### Manual Installation guide
+- [docker](https://www.docker.com/)
 
-1. Create an SSH key
+1. Run the following command to run the integrations tests:
 
-Assuming you already have created an ssh key via `ssh-keygen`
-```
-hcloud ssh-key create --name ssh-key-csi-test --public-key-from-file ~/.ssh/id_rsa.pub 
+```sh
+go test -v ./test/integration
 ```
 
-2. Create a server
-```
-hcloud server create --name csi-test-server --image ubuntu-20.04 --ssh-key ssh-key-csi-test --type cx22 
-```
+### Run the kubernetes e2e tests
 
-3. Setup k3s on this server
-```
-k3sup install --ip $(hcloud server ip csi-test-server) --local-path=/tmp/kubeconfig --cluster --k3s-channel=v1.23 --k3s-extra-args='--no-flannel --no-deploy=servicelb --no-deploy=traefik --disable-cloud-controller --disable-network-policy --kubelet-arg=cloud-provider=external'
-```
-- The kubeconfig will be created under `/tmp/kubeconfig`
-- Kubernetes version can be configured via `--k3s-channel`
+The Hetzner Cloud CSI driver is tested against the official kubernetes e2e tests.
 
-4. Switch your kubeconfig to the test cluster
-```
-export KUBECONFIG=/tmp/kubeconfig
-```
-
-5. Install cilium + test your cluster
-```
-cilium install
-```
+Before running the integrations tests, make sure you followed the [Setup a development environment](#setup-a-development-environment) steps.
 
-6. Add your secret to the cluster
-```
-kubectl -n kube-system create secret generic hcloud --from-literal="token=$HCLOUD_TOKEN"
-```
+1. Run the kubernetes e2e tests using the following command:
 
-7. Install hcloud-cloud-controller-manager + test your cluster
+```sh
+make -C test/e2e/kubernetes test
 ```
-kubectl apply -f  https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/latest/download/ccm.yaml
-kubectl config set-context default
-kubectl get node -o wide
-```
-
-8. Deploy your CSI driver
-```
-SKAFFOLD_DEFAULT_REPO=naokiii skaffold dev
-```
-- `docker login` required
-- Skaffold is using your own dockerhub repo to push the CSI image.
-
-On code change, skaffold will repack the image & deploy it to your test cluster again. Also, it is printing all logs from csi components.
 
 ## License
 
diff --git a/dev/.gitignore b/dev/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..fbef7b5d4dccd0e16f8b203b21f3fd032ae2ba2e
--- /dev/null
+++ b/dev/.gitignore
@@ -0,0 +1,9 @@
+.terraform*
+terraform.tfstate
+terraform.tfstate.backup
+*.auto.tfvars
+
+files/*
+!files/.gitkeep
+
+.env
diff --git a/hack/Dockerfile b/dev/Dockerfile
similarity index 100%
rename from hack/Dockerfile
rename to dev/Dockerfile
diff --git a/dev/Makefile b/dev/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..63f6ea670bcc59c9002c944d5e59c8d5f493213b
--- /dev/null
+++ b/dev/Makefile
@@ -0,0 +1,27 @@
+SHELL = bash
+.ONESHELL:
+
+ENV ?= dev
+K3S_CHANNEL ?= stable
+
+env.auto.tfvars:
+	@echo 'name = "$(ENV)"' > "$@"
+	@echo 'hcloud_token = "$(HCLOUD_TOKEN)"' >> "$@"
+	@echo 'k3s_channel = "$(K3S_CHANNEL)"' >> "$@"
+
+.terraform:
+	tofu init
+
+up: .terraform env.auto.tfvars
+	tofu apply -auto-approve
+	$(MAKE) port-forward
+
+down: .terraform env.auto.tfvars
+	tofu destroy -auto-approve
+
+port-forward:
+	source files/env.sh
+	bash files/registry-port-forward.sh
+
+clean:
+	rm -Rf files/* .terraform* terraform.tfstate* env.auto.tfvars
diff --git a/dev/files/.gitkeep b/dev/files/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/dev/hcloud-k8s-env/main-infra.tf b/dev/hcloud-k8s-env/main-infra.tf
new file mode 100644
index 0000000000000000000000000000000000000000..391019803f3fd80aedee8b853db1119986c19cc0
--- /dev/null
+++ b/dev/hcloud-k8s-env/main-infra.tf
@@ -0,0 +1,102 @@
+# Setup the infrastructure
+
+provider "hcloud" {
+  token = var.hcloud_token
+}
+
+locals {
+  labels = {
+    env = var.name
+  }
+}
+
+# SSH Key
+
+resource "tls_private_key" "ssh" {
+  algorithm = "ED25519"
+}
+
+resource "local_sensitive_file" "ssh" {
+  content  = tls_private_key.ssh.private_key_openssh
+  filename = abspath("${path.root}/files/id_ed25519")
+}
+
+resource "hcloud_ssh_key" "default" {
+  name       = var.name
+  public_key = tls_private_key.ssh.public_key_openssh
+  labels     = local.labels
+}
+
+# Network
+
+resource "hcloud_network" "cluster" {
+  name     = var.name
+  ip_range = "10.0.0.0/8"
+  labels   = local.labels
+}
+
+resource "hcloud_network_subnet" "cluster" {
+  network_id   = hcloud_network.cluster.id
+  network_zone = "eu-central"
+  type         = "cloud"
+  ip_range     = "10.0.0.0/24"
+}
+
+# Control Plane Node
+
+resource "hcloud_server" "control" {
+  name        = "${var.name}-control"
+  server_type = var.hcloud_server_type
+  location    = var.hcloud_location
+  image       = var.hcloud_image
+  ssh_keys    = [hcloud_ssh_key.default.id]
+  labels      = local.labels
+
+  connection {
+    host        = self.ipv4_address
+    private_key = tls_private_key.ssh.private_key_openssh
+  }
+
+  provisioner "remote-exec" {
+    inline = ["cloud-init status --wait || test $? -eq 2"]
+  }
+}
+
+resource "hcloud_server_network" "control" {
+  server_id = hcloud_server.control.id
+  subnet_id = hcloud_network_subnet.cluster.id
+}
+
+# Worker / Agent Nodes
+
+variable "worker_count" {
+  type    = number
+  default = 3
+}
+
+resource "hcloud_server" "worker" {
+  count = var.worker_count
+
+  name        = "${var.name}-worker-${count.index}"
+  server_type = var.hcloud_server_type
+  location    = var.hcloud_location
+  image       = var.hcloud_image
+  ssh_keys    = [hcloud_ssh_key.default.id]
+  labels      = local.labels
+
+  connection {
+    host        = self.ipv4_address
+    private_key = tls_private_key.ssh.private_key_openssh
+  }
+
+  provisioner "remote-exec" {
+    inline = ["cloud-init status --wait || test $? -eq 2"]
+  }
+}
+
+resource "hcloud_server_network" "worker" {
+  count = var.worker_count
+
+  server_id = hcloud_server.worker[count.index].id
+  subnet_id = hcloud_network_subnet.cluster.id
+}
diff --git a/dev/hcloud-k8s-env/main-setup.tf b/dev/hcloud-k8s-env/main-setup.tf
new file mode 100644
index 0000000000000000000000000000000000000000..952f8aee3e654bdeb66cc56d0d8b5e79371c47fa
--- /dev/null
+++ b/dev/hcloud-k8s-env/main-setup.tf
@@ -0,0 +1,216 @@
+# Setup the k3s cluster
+
+locals {
+  # The CIDR range for the Pods, must be included in the range of the
+  # network (10.0.0.0/8) but must not overlap with the Subnet (10.0.0.0/24)
+  cluster_cidr = "10.244.0.0/16"
+
+  registry_service_ip = "10.43.0.2"
+  registry_port       = 30666
+
+  kubeconfig_path = abspath("${path.root}/files/kubeconfig.yaml")
+  env_path        = abspath("${path.root}/files/env.sh")
+}
+
+resource "null_resource" "k3sup_control" {
+  triggers = {
+    id = hcloud_server.control.id
+    ip = hcloud_server_network.control.ip
+  }
+
+  connection {
+    host        = hcloud_server.control.ipv4_address
+    private_key = tls_private_key.ssh.private_key_openssh
+  }
+
+  provisioner "remote-exec" {
+    inline = ["mkdir -p /etc/rancher/k3s"]
+  }
+  provisioner "file" {
+    content = yamlencode({
+      "mirrors" : {
+        "localhost:${local.registry_port}" : {
+          "endpoint" : ["http://${local.registry_service_ip}:5000"]
+        }
+      }
+    })
+    destination = "/etc/rancher/k3s/registries.yaml"
+  }
+
+  provisioner "local-exec" {
+    command = <<-EOT
+      k3sup install --print-config=false \
+        --ssh-key='${local_sensitive_file.ssh.filename}' \
+        --ip='${hcloud_server.control.ipv4_address}' \
+        --k3s-channel='${var.k3s_channel}' \
+        --k3s-extra-args="\
+          --kubelet-arg=cloud-provider=external \
+          --cluster-cidr='${local.cluster_cidr}' \
+          --disable-cloud-controller \
+          --disable-network-policy \
+          --disable=local-storage \
+          --disable=servicelb \
+          --disable=traefik \
+          --flannel-backend=none \
+          --node-external-ip='${hcloud_server.control.ipv4_address}' \
+          --node-ip='${hcloud_server_network.control.ip}'" \
+        --local-path='${local.kubeconfig_path}'
+    EOT
+  }
+}
+
+resource "null_resource" "k3sup_worker" {
+  count = var.worker_count
+
+  triggers = {
+    id = hcloud_server.worker[count.index].id
+    ip = hcloud_server_network.worker[count.index].ip
+
+    # Wait the control-plane to be initialized, and re-join the new cluster if the
+    # control-plane server changed.
+    control_id = null_resource.k3sup_control.id
+  }
+
+  connection {
+    host        = hcloud_server.worker[count.index].ipv4_address
+    private_key = tls_private_key.ssh.private_key_openssh
+  }
+
+  provisioner "remote-exec" {
+    inline = ["mkdir -p /etc/rancher/k3s"]
+  }
+  provisioner "file" {
+    content = yamlencode({
+      "mirrors" : {
+        "localhost:${local.registry_port}" : {
+          "endpoint" : ["http://${local.registry_service_ip}:5000"]
+        }
+      }
+    })
+    destination = "/etc/rancher/k3s/registries.yaml"
+  }
+
+  provisioner "local-exec" {
+    command = <<-EOT
+      k3sup join \
+        --ssh-key='${local_sensitive_file.ssh.filename}' \
+        --ip='${hcloud_server.worker[count.index].ipv4_address}' \
+        --server-ip='${hcloud_server.control.ipv4_address}' \
+        --k3s-channel='${var.k3s_channel}' \
+        --k3s-extra-args="\
+          --kubelet-arg='cloud-provider=external' \
+          --node-external-ip='${hcloud_server.worker[count.index].ipv4_address}' \
+          --node-ip='${hcloud_server_network.worker[count.index].ip}'"
+      EOT
+  }
+}
+
+# Configure kubernetes
+
+data "local_sensitive_file" "kubeconfig" {
+  depends_on = [null_resource.k3sup_control]
+  filename   = local.kubeconfig_path
+}
+
+provider "kubernetes" {
+  config_path = data.local_sensitive_file.kubeconfig.filename
+}
+
+resource "kubernetes_secret_v1" "hcloud_token" {
+  metadata {
+    name      = "hcloud"
+    namespace = "kube-system"
+  }
+
+  data = {
+    token   = var.hcloud_token
+    network = hcloud_network.cluster.id
+  }
+}
+
+provider "helm" {
+  kubernetes {
+    config_path = data.local_sensitive_file.kubeconfig.filename
+  }
+}
+
+resource "helm_release" "cilium" {
+  name       = "cilium"
+  chart      = "cilium"
+  repository = "https://helm.cilium.io"
+  namespace  = "kube-system"
+  version    = "1.13.1" # renovate: datasource=github-releases depName=cilium/cilium extractVersion=v(?<version>.+)
+  wait       = true
+
+  set {
+    name  = "operator.replicas"
+    value = "1"
+  }
+  set {
+    name  = "ipam.mode"
+    value = "kubernetes"
+  }
+  set {
+    name  = "tunnel"
+    value = "disabled"
+  }
+  set {
+    name  = "ipv4NativeRoutingCIDR"
+    value = local.cluster_cidr
+  }
+}
+
+resource "helm_release" "hcloud_cloud_controller_manager" {
+  name       = "hcloud-cloud-controller-manager"
+  chart      = "hcloud-cloud-controller-manager"
+  repository = "https://charts.hetzner.cloud"
+  namespace  = "kube-system"
+  version    = "1.19.0" # renovate: datasource=github-releases depName=hetznercloud/hcloud-cloud-controller-manager extractVersion=v(?<version>.+)
+  wait       = true
+
+  set {
+    name  = "networking.enabled"
+    value = "true"
+  }
+}
+
+resource "helm_release" "docker_registry" {
+  name       = "docker-registry"
+  chart      = "docker-registry"
+  repository = "https://helm.twun.io"
+  namespace  = "kube-system"
+  version    = "2.2.2" # renovate: datasource=github-releases depName=distribution/distribution extractVersion=v(?<version>.+)
+  wait       = true
+
+  set {
+    name  = "service.clusterIP"
+    value = local.registry_service_ip
+  }
+  set {
+    name  = "tolerations[0].key"
+    value = "node.cloudprovider.kubernetes.io/uninitialized"
+  }
+  set {
+    name  = "tolerations[0].operator"
+    value = "Exists"
+  }
+}
+
+# Export files
+
+resource "local_file" "registry_port_forward" {
+  source          = "${path.module}/registry-port-forward.sh"
+  filename        = "${path.root}/files/registry-port-forward.sh"
+  file_permission = "0755"
+}
+
+resource "local_file" "env" {
+  content         = <<-EOT
+    #!/usr/bin/env bash
+
+    export KUBECONFIG=${data.local_sensitive_file.kubeconfig.filename}
+    export SKAFFOLD_DEFAULT_REPO=localhost:${local.registry_port}
+  EOT
+  filename        = local.env_path
+  file_permission = "0644"
+}
diff --git a/dev/hcloud-k8s-env/providers.tf b/dev/hcloud-k8s-env/providers.tf
new file mode 100644
index 0000000000000000000000000000000000000000..1dcc2da62ccbcd1e00ab0ec41fa541b12d95756e
--- /dev/null
+++ b/dev/hcloud-k8s-env/providers.tf
@@ -0,0 +1,29 @@
+terraform {
+  required_providers {
+    local = {
+      source  = "hashicorp/local"
+      version = "2.5.1"
+    }
+    null = {
+      source  = "hashicorp/null"
+      version = "3.2.2"
+    }
+    tls = {
+      source  = "hashicorp/tls"
+      version = "4.0.5"
+    }
+    helm = {
+      source  = "hashicorp/helm"
+      version = "2.14.0"
+    }
+    kubernetes = {
+      source  = "hashicorp/kubernetes"
+      version = "2.31.0"
+    }
+
+    hcloud = {
+      source  = "hetznercloud/hcloud"
+      version = "1.45.0"
+    }
+  }
+}
diff --git a/dev/hcloud-k8s-env/registry-port-forward.sh b/dev/hcloud-k8s-env/registry-port-forward.sh
new file mode 100755
index 0000000000000000000000000000000000000000..863adeb4c9b1142dbd3d5dab82238b003bd3ecae
--- /dev/null
+++ b/dev/hcloud-k8s-env/registry-port-forward.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+set -ue -o pipefail
+
+run() {
+  unit="k8s-registry-port-forward.service"
+  description="Port Forward for Container Registry of k8s dev environment"
+
+  systemctl --user stop "$unit" 2> /dev/null || true
+
+  systemd-run --user \
+    --unit="$unit" \
+    --description="$description" \
+    --same-dir \
+    --setenv="KUBECONFIG=$KUBECONFIG" \
+    --collect \
+    kubectl port-forward -n kube-system svc/docker-registry 30666:5000
+}
+
+run
diff --git a/dev/hcloud-k8s-env/variables.tf b/dev/hcloud-k8s-env/variables.tf
new file mode 100644
index 0000000000000000000000000000000000000000..d558345b38d646746f6a0790934440ecf36f2bda
--- /dev/null
+++ b/dev/hcloud-k8s-env/variables.tf
@@ -0,0 +1,35 @@
+# Environement
+variable "name" {
+  description = "Name of the environement"
+  type        = string
+  default     = "dev"
+}
+
+# Hetzner Cloud
+variable "hcloud_token" {
+  description = "Hetzner Cloud API token"
+  type        = string
+  sensitive   = true
+}
+variable "hcloud_server_type" {
+  description = "Hetzner Cloud Server Type used for the environement"
+  type        = string
+  default     = "cpx21"
+}
+variable "hcloud_location" {
+  description = "Hetzner Cloud Location used for the environement"
+  type        = string
+  default     = "fsn1"
+}
+variable "hcloud_image" {
+  description = "Hetzner Cloud Image used for the environement"
+  type        = string
+  default     = "ubuntu-24.04"
+}
+
+# K3S
+variable "k3s_channel" {
+  description = "k3S channel used for the environement"
+  type        = string
+  default     = "stable"
+}
diff --git a/hack/kustomization.yaml b/dev/kustomization.yaml
similarity index 100%
rename from hack/kustomization.yaml
rename to dev/kustomization.yaml
diff --git a/dev/main.tf b/dev/main.tf
new file mode 100644
index 0000000000000000000000000000000000000000..5147587061516482b06edc75ed14cc504e2ffd23
--- /dev/null
+++ b/dev/main.tf
@@ -0,0 +1,8 @@
+module "dev" {
+  source = "./hcloud-k8s-env"
+
+  name         = "csi-driver-${replace(var.name, "/[^a-zA-Z0-9-_]/", "-")}"
+  hcloud_token = var.hcloud_token
+
+  k3s_channel = var.k3s_channel
+}
diff --git a/dev/variables.tf b/dev/variables.tf
new file mode 100644
index 0000000000000000000000000000000000000000..46b34057d0d426ad9245a9f93e15313686960796
--- /dev/null
+++ b/dev/variables.tf
@@ -0,0 +1,10 @@
+variable "name" {
+  type = string
+}
+variable "hcloud_token" {
+  type      = string
+  sensitive = true
+}
+variable "k3s_channel" {
+  type = string
+}
diff --git a/hack/dev-down.sh b/hack/dev-down.sh
deleted file mode 100755
index bf3097fb9f6baab8154eee2b42de4adf124bd5eb..0000000000000000000000000000000000000000
--- a/hack/dev-down.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-set -ue -o pipefail
-SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
-
-scope="${SCOPE:-dev}"
-scope=${scope//[^a-zA-Z0-9_]/-}
-scope_name=csi-${scope}
-label="managedby=hack"
-
-if [[ "${ALL:-}" == "" ]]; then
-  label="$label,scope=$scope_name"
-  rm -f $SCRIPT_DIR/.ssh-$scope $SCRIPT_DIR/.kubeconfig-$scope
-else
-  rm -f $SCRIPT_DIR/.ssh* $SCRIPT_DIR/.kubeconfig*
-fi
-
-for instance in $(hcloud server list -o noheader -o columns=id -l $label); do
-  (
-    hcloud server delete $instance
-  ) &
-done
-
-
-for key in $(hcloud ssh-key list -o noheader -o columns=name -l $label); do
-  (
-    hcloud ssh-key delete $key
-  ) &
-done
-
-
-for key in $(hcloud network list -o noheader -o columns=name -l $label); do
-  (
-    hcloud network delete $key
-  ) &
-done
-
-wait
\ No newline at end of file
diff --git a/hack/dev-up.sh b/hack/dev-up.sh
deleted file mode 100755
index 943258ed3c95a43afc34321beb9c59ba8da8de7c..0000000000000000000000000000000000000000
--- a/hack/dev-up.sh
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/usr/bin/env bash
-set -ueo pipefail
-SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
-
-if [[ -n "${DEBUG:-}" ]]; then set -x; fi
-
-# Redirect all stdout to stderr.
-{
-  if ! hcloud version >/dev/null; then echo "ERROR: 'hcloud' CLI not found, please install it and make it available on your \$PATH"; exit 1; fi
-  if ! k3sup version >/dev/null; then echo "ERROR: 'k3sup' not found, please install it and make it available on your \$PATH"; exit 1; fi
-  if ! helm version >/dev/null; then echo "ERROR: 'helm' not found, please install it and make it available on your \$PATH"; exit 1; fi
-  if [[ "${HCLOUD_TOKEN:-}" == "" ]]; then echo "ERROR: please set \$HCLOUD_TOKEN"; exit 1; fi
-
-  # We run a lot of subshells below for speed. If any encounter an error, we shut down the whole process group, pronto.
-  function error() {
-    echo "Onoes, something went wrong! :( The output above might have some clues."
-    kill 0
-  }
-
-  trap error ERR
-
-  image_name=${IMAGE_NAME:-ubuntu-20.04}
-  instance_count=${INSTANCES:-3}
-  instance_type=${INSTANCE_TYPE:-cpx11}
-  location=${LOCATION:-fsn1}
-  network_zone=${NETWORK_ZONE:-eu-central}
-  ssh_keys=${SSH_KEYS:-}
-  # All k3s after January 2024 break our e2e tests, we hardcode
-  # the versions for now until we can fix the source of this.
-  # channel=${K3S_CHANNEL:-stable}
-  k3s_version=${K3S_VERSION:-v1.28.5+k3s1}
-  network_cidr=${NETWORK_CIDR:-10.0.0.0/8}
-  subnet_cidr=${SUBNET_CIDR:-10.0.0.0/24}
-  cluster_cidr=${CLUSTER_CIDR:-10.244.0.0/16}
-  scope="${SCOPE:-dev}"
-  scope=${scope//[^a-zA-Z0-9_]/-}
-  scope_name=csi-${scope}
-  label="managedby=hack,scope=$scope_name"
-  ssh_private_key="$SCRIPT_DIR/.ssh-$scope"
-  k3s_opts=${K3S_OPTS:-"--kubelet-arg cloud-provider=external"}
-  k3s_server_opts=${K3S_SERVER_OPTS:-"--disable-cloud-controller --disable=traefik --disable=servicelb --disable=local-storage --flannel-backend=none --cluster-cidr ${cluster_cidr}"}
-
-  echo -n "$HCLOUD_TOKEN" > "$SCRIPT_DIR/.token-$scope"
-
-  export KUBECONFIG="$SCRIPT_DIR/.kubeconfig-$scope"
-
-  ssh_command="ssh -i $ssh_private_key -o StrictHostKeyChecking=off -o BatchMode=yes -o ConnectTimeout=5"
-
-  # Generate SSH keys and upload publkey to Hetzner Cloud.
-  ( trap error ERR
-    [[ ! -f $ssh_private_key ]] && ssh-keygen -t ed25519 -f $ssh_private_key -C '' -N ''
-    [[ ! -f $ssh_private_key.pub ]] && ssh-keygen -y -f $ssh_private_key > $ssh_private_key.pub
-    if ! hcloud ssh-key describe $scope_name >/dev/null 2>&1; then
-      hcloud ssh-key create --label $label --name $scope_name --public-key-from-file $ssh_private_key.pub
-    fi
-  ) &
-
-  # Create Network
-  ( trap error ERR
-     if ! hcloud network describe $scope_name >/dev/null 2>&1; then
-       hcloud network create --label $label --ip-range $network_cidr --name $scope_name
-       hcloud network add-subnet --network-zone $network_zone --type cloud --ip-range $subnet_cidr $scope_name
-     fi
-    ) &
-
-
-  for num in $(seq $instance_count); do
-    # Create server and initialize Kubernetes on it with k3sup.
-    ( trap error ERR
-
-      server_name="$scope_name-$num"
-
-      # Maybe cluster is already up and node is already there.
-      if kubectl get node $server_name >/dev/null 2>&1; then
-        exit 0
-      fi
-
-      ip=$(hcloud server ip $server_name 2>/dev/null || true)
-
-      if [[ -z "${ip:-}" ]]; then
-        # Wait for SSH key
-        until hcloud ssh-key describe $scope_name >/dev/null 2>&1; do sleep 1; done
-        until hcloud network describe $scope_name 2>&1 | grep $subnet_cidr >/dev/null; do sleep 1; done
-
-        createcmd="hcloud server create --image $image_name --label $label --location $location --name $server_name --ssh-key=$scope_name --type $instance_type --network $scope_name"
-        for key in $ssh_keys; do
-          createcmd+=" --ssh-key $key"
-        done
-        $createcmd
-        ip=$(hcloud server ip $server_name)
-      fi
-
-      # Wait for SSH.
-      until [ "$($ssh_command root@$ip echo ok 2>/dev/null)" = "ok" ]; do
-        sleep 1
-      done
-
-      $ssh_command root@$ip 'mkdir -p /etc/rancher/k3s && cat > /etc/rancher/k3s/registries.yaml' < $SCRIPT_DIR/k3s-registries.yaml
-
-      private_ip=$(hcloud server describe $server_name -o format="{{ (index .PrivateNet 0).IP }}")
-      k3s_node_ip_opts="--node-external-ip ${ip} --node-ip ${private_ip}"
-
-      if [[ "$num" == "1" ]]; then
-        # First node is control plane.
-        k3sup install --print-config=false --ip $ip --k3s-version "${k3s_version}" --k3s-extra-args "${k3s_server_opts} ${k3s_opts} ${k3s_node_ip_opts}" --local-path $KUBECONFIG --ssh-key $ssh_private_key
-      else
-        # All subsequent nodes are initialized as workers.
-
-        # Can't go any further until control plane has bootstrapped a bit though.
-        until $ssh_command root@$(hcloud server ip $scope_name-1 || true) stat /etc/rancher/node/password >/dev/null 2>&1; do
-          sleep 1
-        done
-
-        k3sup join --server-ip $(hcloud server ip $scope_name-1) --ip $ip --k3s-version "${k3s_version}" --k3s-extra-args "${k3s_opts} ${k3s_node_ip_opts}" --ssh-key $ssh_private_key
-      fi
-    ) &
-
-    # Wait for this node to show up in the cluster.
-    ( trap error ERR; set +x
-      until kubectl wait --for=condition=Ready node/$scope_name-$num >/dev/null 2>&1; do sleep 1; done
-      echo $scope_name-$num is up and in cluster
-    ) &
-  done
-
-  ( trap error ERR
-    # Control plane init tasks.
-    # This is running in parallel with the server init, above.
-
-    # Wait for control plane to look alive.
-    until kubectl get nodes >/dev/null 2>&1; do sleep 1; done;
-
-    # Deploy private registry.
-    ( trap error ERR
-      if ! helm status -n kube-system registry >/dev/null 2>&1; then
-        helm install registry docker-registry \
-          --repo=https://helm.twun.io \
-          -n kube-system \
-          --version 2.2.2 \
-          --set service.clusterIP=10.43.0.2 \
-          --set 'tolerations[0].key=node.cloudprovider.kubernetes.io/uninitialized' \
-          --set 'tolerations[0].operator=Exists'
-      fi
-      ) &
-
-    # Install Cilium.
-    ( trap error ERR
-      if ! helm status -n kube-system cilium >/dev/null 2>&1; then
-        helm install cilium cilium --repo https://helm.cilium.io/ -n kube-system --version 1.13.1 \
-          --set tunnel=disabled \
-          --set ipv4NativeRoutingCIDR=$cluster_cidr \
-          --set ipam.mode=kubernetes
-      fi) &
-
-    # Create HCLOUD_TOKEN Secret for hcloud-cloud-controller-manager.
-    ( trap error ERR
-      if ! kubectl -n kube-system get secret hcloud >/dev/null 2>&1; then
-        kubectl -n kube-system create secret generic hcloud --from-literal="token=$HCLOUD_TOKEN" --from-literal="network=$scope_name"
-      fi) &
-    wait
-
-    # Install hcloud-cloud-controller-manager
-    ( trap error ERR
-      if ! helm status -n kube-system hccm >/dev/null 2>&1; then
-        helm install hccm hcloud-cloud-controller-manager --repo https://charts.hetzner.cloud/ -n kube-system --version 1.14.2 --set networking.enabled=true
-      fi) &
-    wait
-  ) &
-  wait
-  echo "Success - cluster fully initialized and ready, why not see for yourself?"
-  echo '$ kubectl get nodes'
-  kubectl get nodes
-} >&2
-
-echo "export KUBECONFIG=$KUBECONFIG"
-$SCRIPT_DIR/registry-port-forward.sh
-echo "export SKAFFOLD_DEFAULT_REPO=localhost:30666"
diff --git a/hack/k3s-registries.yaml b/hack/k3s-registries.yaml
deleted file mode 100644
index 8c808b1211b316865b5654ec133faeea95021aca..0000000000000000000000000000000000000000
--- a/hack/k3s-registries.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-mirrors:
-  localhost:30666:
-    endpoint: ["http://10.43.0.2:5000"]
diff --git a/hack/registry-port-forward.sh b/hack/registry-port-forward.sh
deleted file mode 100755
index 082079d242425a38828343a85702f2a5c4e4ffe7..0000000000000000000000000000000000000000
--- a/hack/registry-port-forward.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env bash
-set -ue -o pipefail
-SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
-
-{
-until kubectl -n kube-system --timeout=30s rollout status deployment/registry-docker-registry >/dev/null 2>&1; do sleep 1; done
-old_pid=$(cat $SCRIPT_DIR/.reg-pf 2>/dev/null || true)
-if [[ -n "$old_pid" ]]; then
-  echo "killing old port-forward with PID $old_pid"
-  kill $old_pid || true
-fi
-
-nohup kubectl port-forward -n kube-system svc/registry-docker-registry 30666:5000 >$SCRIPT_DIR/.reg-pf.out 2>$SCRIPT_DIR/.reg-pf.err &
-} >&2
-
-echo $! > $SCRIPT_DIR/.reg-pf
diff --git a/kustomization.yaml b/kustomization.yaml
index cf5e63c09a36cd3e36ad9330f320a8a2cd733408..73de684e2698600388f373b51cd90278a484752b 100644
--- a/kustomization.yaml
+++ b/kustomization.yaml
@@ -1,4 +1,4 @@
 apiVersion: kustomize.config.k8s.io/v1beta1
 kind: Kustomization
 resources:
-- deploy/kubernetes/
+  - deploy/kubernetes
diff --git a/skaffold.yaml b/skaffold.yaml
index 549cadddc20b410cbf1addaa5ab106cc17d41df5..b9831baecf7e87ebe39b59148f8549683a6d129a 100644
--- a/skaffold.yaml
+++ b/skaffold.yaml
@@ -1,4 +1,4 @@
-apiVersion: skaffold/v4beta3
+apiVersion: skaffold/v4beta11
 kind: Config
 metadata:
   name: csi-driver
@@ -6,13 +6,12 @@ build:
   artifacts:
     - image: docker.io/hetznercloud/hcloud-csi-driver
       docker:
-        dockerfile: hack/Dockerfile
-        cacheFrom:
-          - docker.io/hetznercloud/hcloud-csi-driver:buildcache
+        dockerfile: dev/Dockerfile
   local:
     useBuildkit: true
   insecureRegistries:
     - localhost:30666
+
 manifests:
   helm:
     releases:
diff --git a/test/e2e/kubernetes/.gitignore b/test/e2e/kubernetes/.gitignore
index a459cf817dc755e96bf90ff7a198ebe0a0c2cbda..ba077a4031add5b3a04384f8b9cfc414efbf47dd 100644
--- a/test/e2e/kubernetes/.gitignore
+++ b/test/e2e/kubernetes/.gitignore
@@ -1 +1 @@
-test-binaries/
\ No newline at end of file
+bin
diff --git a/test/e2e/kubernetes/Makefile b/test/e2e/kubernetes/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..58479eb353111b3921f08857f507af2a934d078c
--- /dev/null
+++ b/test/e2e/kubernetes/Makefile
@@ -0,0 +1,31 @@
+SHELL = bash
+
+K8S_TEST_VERSION ?= v1.29.0
+
+GINKGO = bin/ginkgo
+GINKGO_ARGS = -v --flake-attempts=2
+
+E2E = bin/e2e.test
+E2E_ARGS = -storage.testdriver=$(CURDIR)/testdriver.yaml
+
+bin:
+	mkdir -p bin
+	curl -sSL "https://dl.k8s.io/$(K8S_TEST_VERSION)/kubernetes-test-linux-amd64.tar.gz" | \
+		tar --strip-components=3 -C bin -zxf - \
+			kubernetes/test/bin/e2e.test \
+			kubernetes/test/bin/ginkgo
+
+parallel: bin
+	$(GINKGO) $(GINKGO_ARGS) \
+		-nodes=12 \
+		-focus='External.Storage' \
+		-skip='\[Feature:|\[Disruptive\]|\[Serial\]' \
+		$(E2E) -- $(E2E_ARGS)
+
+serial: bin
+	$(GINKGO) $(GINKGO_ARGS) \
+		-focus='External.Storage.*(\[Feature:|\[Serial\])' \
+		-skip='\[Feature:SELinuxMountReadWriteOncePod\]' \
+		$(E2E) -- $(E2E_ARGS)
+
+test: parallel serial
diff --git a/test/e2e/kubernetes/run-e2e-tests.sh b/test/e2e/kubernetes/run-e2e-tests.sh
deleted file mode 100755
index 8db8edad3386dcaeb9c41b56d11846a02116c8b0..0000000000000000000000000000000000000000
--- a/test/e2e/kubernetes/run-e2e-tests.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-set -uex -o pipefail
-SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
-
-k8s_test_version="${K8S_TEST_VERSION:-v1.29.0}"
-
-mkdir -p "${SCRIPT_DIR}/test-binaries"
-# TODO: Read linux-amd64 from env
-curl --location "https://dl.k8s.io/${k8s_test_version}/kubernetes-test-linux-amd64.tar.gz" | \
-  tar --strip-components=3 -C "${SCRIPT_DIR}/test-binaries" -zxf - kubernetes/test/bin/e2e.test kubernetes/test/bin/ginkgo
-
-ginkgo="${SCRIPT_DIR}/test-binaries/ginkgo"
-ginkgo_flags="-v --flakeAttempts=2"
-
-e2e="${SCRIPT_DIR}/test-binaries/e2e.test"
-e2e_flags="-storage.testdriver=${SCRIPT_DIR}/testdriver.yaml"
-
-echo "Executing parallel tests"
-${ginkgo} ${ginkgo_flags} \
-  -nodes=6 \
-  -focus='External.Storage' \
-  -skip='\[Feature:|\[Disruptive\]|\[Serial\]' \
-  "${e2e}" -- ${e2e_flags}
-
-echo "Executing serial tests"
-${ginkgo} ${ginkgo_flags} \
-  -focus='External.Storage.*(\[Feature:|\[Serial\])' \
-  -skip='\[Feature:SELinuxMountReadWriteOncePod\]' \
-  "${e2e}" -- ${e2e_flags}
-