Skip to content

Commit e96d2b3

Browse files
committed
build: Add E2E tests
Add new E2E tests that ensure cis-operator mechanics are working as intented. Note that this does not aim to confirm test accuracy, as that is security-scan's responsibility. Signed-off-by: Paulo Gomes <[email protected]>
1 parent e9aba3e commit e96d2b3

File tree

8 files changed

+478
-2
lines changed

8 files changed

+478
-2
lines changed

Dockerfile.dapper

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,26 @@
11
FROM registry.suse.com/bci/golang:1.19
22

3+
# k3d and kubectl versions must be aligned with the Kubernetes versions
4+
# set in tests/k3s-bench-test.yaml.
5+
# k3d is used for e2e tests and is not shipped on the final image.
6+
ARG K3D_VERSION=5.4.8
7+
ARG KUBERNETES_VERSION=1.28.0
8+
9+
ENV GOLANGCI_LINT v1.51.2
10+
311
ARG DAPPER_HOST_ARCH
412
ENV ARCH $DAPPER_HOST_ARCH
5-
ENV GOLANGCI_LINT v1.51.2
613

7-
RUN zypper -n install git docker vim less file curl wget
14+
RUN zypper -n install git docker vim less file curl wget awk jq
815
RUN if [[ "${ARCH}" == "amd64" ]]; then \
916
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s ${GOLANGCI_LINT}; \
1017
fi
1118

19+
RUN curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG="v${K3D_VERSION}" bash
20+
ADD --chown=root:root --chmod=755 \
21+
"https://dl.k8s.io/release/v${KUBERNETES_VERSION}/bin/linux/${ARCH}/kubectl" \
22+
/usr/local/bin/kubectl
23+
1224
ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS
1325
ENV DAPPER_SOURCE /go/src/github.com/rancher/cis-operator/
1426
ENV DAPPER_OUTPUT ./bin ./dist

package/Dockerfile

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
FROM registry.suse.com/bci/bci-busybox:15.5
22

33
COPY pkg/ pkg/
4+
5+
# Ensure 65535 can access the templates in
6+
# pkg/securityscan/core/templates
7+
RUN chmod -R +xr pkg/
8+
49
COPY bin/cis-operator /usr/bin/
510

611
USER 65535:65535

scripts/ci

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,3 +8,4 @@ cd $(dirname $0)
88
./validate
99
./validate-ci
1010
./package
11+
./e2e

scripts/e2e

Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
#!/bin/bash
2+
set -eoux pipefail
3+
4+
export ARCH="${ARCH:-amd64}"
5+
export IMAGE=cis-operator:e2e
6+
7+
# TODO: dynamically find images required and preload them into k3d.
8+
export SECURITY_SCAN_IMAGE=rancher/security-scan:v0.2.13
9+
export SONOBUOY_IMAGE=rancher/mirrored-sonobuoy-sonobuoy:v0.56.16
10+
export COREDNS_IMAGE=rancher/mirrored-coredns-coredns:1.9.4
11+
export HELM_IMAGE=rancher/klipper-helm:v0.7.4-build20221121
12+
13+
CLUSTER_NAME="cis-op-e2e-${RANDOM}"
14+
E2E_TIMEOUT_SECONDS=200
15+
16+
CANCELLING=""
17+
NETWORK_ID=""
18+
CURRENT_CONTAINER=$(cat /etc/hostname)
19+
20+
function cleanup() {
21+
CANCELLING="true"
22+
echo "Cleaning up clusters..."
23+
24+
docker stop "k3d-${CLUSTER_NAME}-server-0"
25+
docker network disconnect "${NETWORK_ID}" "${CURRENT_CONTAINER}"
26+
docker network rm -f "k3d-${CLUSTER_NAME}"
27+
}
28+
trap cleanup EXIT
29+
30+
function pull_image() {
31+
EXTERNAL_IMAGE=$1
32+
echo "> Pull and import ${EXTERNAL_IMAGE} into cluster"
33+
docker pull "${EXTERNAL_IMAGE}"
34+
k3d image import "${EXTERNAL_IMAGE}" -c "${CLUSTER_NAME}"
35+
}
36+
37+
function dump_logs() {
38+
kubectl get pods -n cis-operator-system --show-labels
39+
echo "RUNNER LOGS:"
40+
kubectl logs -n cis-operator-system -l app.kubernetes.io/instance=security-scan-runner-k3s-e2e-scan || true
41+
echo "SONOBUOY LOGS (rancher-kube-bench):"
42+
kubectl logs -n cis-operator-system -l component=sonobuoy -c rancher-kube-bench || true
43+
echo "SONOBUOY LOGS (sonobuoy-worker):"
44+
kubectl logs -n cis-operator-system -l component=sonobuoy -c sonobuoy-worker || true
45+
}
46+
47+
cd $(dirname $0)/..
48+
49+
echo "Running E2E tests"
50+
sleep "${E2E_TIMEOUT_SECONDS}" && cleanup | false &
51+
52+
docker build -t local-k3s -f tests/Dockerfile.k3s tests
53+
54+
echo "> Spinning up k3d cluster"
55+
# After a few executions k3d can have problems with evictions:
56+
# https://k3d.io/v5.0.1/faq/faq/#pods-evicted-due-to-lack-of-disk-space
57+
k3d cluster create "${CLUSTER_NAME}" --no-lb --kubeconfig-update-default --image local-k3s \
58+
--k3s-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%@server:0' \
59+
--k3s-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%@server:0'
60+
61+
# Build image and import it into k3d.
62+
echo "> Build and load ${IMAGE} into cluster"
63+
docker build --build-arg ARCH -f package/Dockerfile -t "${IMAGE}" .
64+
k3d image import "${IMAGE}" -c "${CLUSTER_NAME}"
65+
66+
pull_image "${SECURITY_SCAN_IMAGE}"
67+
pull_image "${SONOBUOY_IMAGE}"
68+
pull_image "${COREDNS_IMAGE}"
69+
pull_image "${HELM_IMAGE}"
70+
71+
# Dapper will run on an isolated docker network.
72+
# To access k3d, grab the current container and connect it to k3d's network.
73+
NETWORK_ID=$(docker network ls -f name="k3d-${CLUSTER_NAME}" -q)
74+
docker network connect "${NETWORK_ID}" "${CURRENT_CONTAINER}"
75+
SERVER_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{println .IPAddress}}{{end}}' "k3d-${CLUSTER_NAME}-server-0" | head -n1)
76+
77+
# k3d's kubeconfig must be updated to the actual container IP.
78+
echo "> Update server to ${SERVER_IP}"
79+
kubectl config set-cluster "k3d-${CLUSTER_NAME}" --server="https://${SERVER_IP}:6443"
80+
81+
# cis-operator may have intermittent issues if key components
82+
# from the cluster aren't ready.
83+
echo "> Wait for k3d base components to be ready"
84+
kubectl wait node "k3d-${CLUSTER_NAME}-server-0" --for=condition=ready --timeout=45s
85+
kubectl wait --timeout=60s --for=condition=ready -n kube-system pod -l app=local-path-provisioner
86+
kubectl wait --timeout=60s --for=condition=ready -n kube-system pod -l k8s-app=kube-dns
87+
88+
echo "> Deploying cis-operator"
89+
kubectl apply -f ./crds
90+
kubectl apply -f ./tests/deploy.yaml
91+
92+
echo "> Wait for cis-operator to be ready"
93+
# Can't kubectl wait before the deployment schedules the pod, so
94+
# wait 10 seconds for that to happen first.
95+
sleep 10s
96+
kubectl wait --for=condition=ready -n cis-operator-system pod -l cis.cattle.io/operator=cis-operator --timeout=30s
97+
98+
echo "> Create ClusterScan"
99+
kubectl apply -f tests/k3s-bench-test.yaml
100+
101+
docker exec "k3d-${CLUSTER_NAME}-server-0" /usr/local/bin/kube-apiserver &
102+
103+
# Keep trying to check if the ClusterScan had any some tests that passed
104+
# that is good enough indication that all the mechanics of cis-operator
105+
# are working as expected.
106+
#
107+
# As soon as passing tests are detected, exit the e2e. If none is found,
108+
# the tests will eventually timeout based on E2E_TIMEOUT_SECONDS.
109+
while (true)
110+
do
111+
if [ -n "${CANCELLING}" ]; then
112+
break
113+
fi
114+
115+
json=$(kubectl get ClusterScan k3s-e2e-scan -o jsonpath='{.status.summary}')
116+
if [ -n "${json}" ]; then
117+
passed=$(echo "${json}" | jq '.pass')
118+
total=$(echo "${json}" | jq '.total')
119+
fail=$(echo "${json}" | jq '.fail')
120+
121+
if [ "${passed}" -gt "0" ]; then
122+
echo "> cis-operator worked successfully"
123+
124+
kubectl get ClusterScan -o yaml
125+
echo "${json}" | jq .
126+
127+
exit 0
128+
fi
129+
130+
if [ "${total}" == "${fail}" ]; then
131+
echo "ERR: ALL TESTS FAILED!"
132+
exit 1
133+
fi
134+
fi
135+
136+
dump_logs
137+
sleep 2s
138+
done

tests/Dockerfile.k3s

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# This image is solely used for testing purposes
2+
# and aims to wrap around k3s, making any needed
3+
# changes for the cis-operator tests to work.
4+
FROM rancher/k3s:v1.25.6-k3s1
5+
6+
# Upstream does not have files /etc/passwd nor /etc/group
7+
# which causes cis-operator to fail when scheduling a
8+
# running that maps those files from the "host".
9+
RUN echo "root:!:0:0::/:/bin/false" > /etc/passwd && \
10+
touch /etc/group
11+
12+
# A fake apiserver to trigger the if condition within
13+
# security-scan that runs kube-bench for the api-server.
14+
COPY kube-apiserver /usr/local/bin/kube-apiserver

0 commit comments

Comments
 (0)