diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 1e258ae13898f0e5e8496008aec728da3fca8acb..f020c10d7b58afa9a13b206e2a26207ceeb050f8 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -99,23 +99,9 @@ jobs: image: ${{ matrix.kind-image }} wait: 10s # Without default CNI, control-plane doesn't get ready until Cilium is installed config: .github/workflows/kind/config.yml - - name: Setup Helm - uses: azure/setup-helm@v3.5 - - name: Install Cilium + - name: Install kube-router for NetworkPolicy support run: | - helm repo add cilium https://helm.cilium.io/ - helm install cilium cilium/cilium --version 1.9.13 \ - --namespace kube-system \ - --set nodeinit.enabled=true \ - --set kubeProxyReplacement=partial \ - --set hostServices.enabled=false \ - --set externalIPs.enabled=true \ - --set nodePort.enabled=true \ - --set hostPort.enabled=true \ - --set bpf.masquerade=false \ - --set image.pullPolicy=IfNotPresent \ - --set ipam.mode=kubernetes \ - --set operator.replicas=1 + kubectl apply -f .github/workflows/kind/kube-router.yaml - name: Wait for cluster to finish bootstraping run: kubectl wait --for=condition=Ready pods --all --all-namespaces --timeout=300s - name: Create kube-prometheus stack diff --git a/.github/workflows/kind/config.yml b/.github/workflows/kind/config.yml index e0ac61d536534f3bb994c336cc5f7a84baad239b..204a5a59232288b77050af2a6ea721aef416a60d 100644 --- a/.github/workflows/kind/config.yml +++ b/.github/workflows/kind/config.yml @@ -1,6 +1,15 @@ kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 networking: - disableDefaultCNI: true podSubnet: "10.10.0.0/16" serviceSubnet: "10.11.0.0/16" +nodes: + - role: control-plane + extraMounts: + - hostPath: /home/runner/work/kube-prometheus/kube-prometheus/.github/workflows/kind/patches + containerPath: /patches +kubeadmConfigPatches: + - | + kind: InitConfiguration + patches: + directory: /patches diff --git a/.github/workflows/kind/kube-router.yaml b/.github/workflows/kind/kube-router.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b63ea7e31ab42df8c0754b38d691acee5c1d9f15 --- /dev/null +++ b/.github/workflows/kind/kube-router.yaml @@ -0,0 +1,186 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-cfg + namespace: kube-system + labels: + tier: node + k8s-app: kube-router +data: + cni-conf.json: | + { + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, + "ipam":{ + "type":"host-local" + } + } + ] + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: kube-router + tier: node + name: kube-router + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-router + tier: node + template: + metadata: + labels: + k8s-app: kube-router + tier: node + spec: + priorityClassName: system-node-critical + serviceAccountName: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router + imagePullPolicy: Always + args: + - --run-router=true + - --run-firewall=true + - --run-service-proxy=false + - --bgp-graceful-restart=true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 + resources: + requests: + memory: 250Mi + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router/kubeconfig + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + initContainers: + - name: install-cni + image: docker.io/cloudnativelabs/kube-router + imagePullPolicy: Always + command: + - /bin/sh + - -c + - set -e -x; + if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then + if [ -f /etc/cni/net.d/*.conf ]; then + rm -f /etc/cni/net.d/*.conf; + fi; + TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; + cp /etc/kube-router/cni-conf.json ${TMP}; + mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; + fi + volumeMounts: + - mountPath: /etc/cni/net.d + name: cni-conf-dir + - mountPath: /etc/kube-router + name: kube-router-cfg + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + - name: cni-conf-dir + hostPath: + path: /etc/cni/net.d + - name: kube-router-cfg + configMap: + name: kube-router-cfg + - name: kubeconfig + hostPath: + path: /var/lib/kube-router/kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: + - kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/.github/workflows/kind/patches/kube-controller-manager.yaml b/.github/workflows/kind/patches/kube-controller-manager.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fb41faf1a878a050ae5830dae30359fedeb21a9a --- /dev/null +++ b/.github/workflows/kind/patches/kube-controller-manager.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager + namespace: kube-system +spec: + containers: + - name: kube-controller-manager + resources: + requests: + cpu: 1m