Skip to content
Snippets Groups Projects
Unverified Commit d56af681 authored by Mohamed S. Mahmoud's avatar Mohamed S. Mahmoud Committed by GitHub
Browse files

Add KinD deployment env for developement (#277)

parent b3c02b8e
No related branches found
No related tags found
No related merge requests found
......@@ -160,6 +160,15 @@ tests-e2e: prereqs ## Run e2e tests
$(OCI_BIN) save -o ebpf-agent.tar localhost/ebpf-agent:test
GOOS=$(GOOS) go test -p 1 -timeout 30m -v -mod vendor -tags e2e ./e2e/...
.PHONY: create-and-deploy-kind-cluster
create-and-deploy-kind-cluster: prereqs ## Create a kind cluster and deploy the agent.
scripts/kind-cluster.sh
.PHONY: destroy-kind-cluster
destroy-kind-cluster: ## Destroy the kind cluster.
oc delete -f scripts/agent.yml
kind delete cluster
##@ Images
# note: to build and push custom image tag use: IMAGE_ORG=myuser VERSION=dev s
......
......@@ -93,6 +93,22 @@ and whether they worked (✅) or did not (❌):
| Kind | 1.23.5 | ❌ | ✅ |
| OpenShift | 1.23.3 | ✅ | ✅ |
## Running on KinD cluster
### How to run on kind cluster
Install KinD and the ebpf agent and export KUBECONFIG
```sh
make create-and-deploy-kind-cluster
export KUBECONFIG=$(pwd)/scripts/kubeconfig
```
### Deleting the kind cluster
In order to delete the kind cluster:
```sh
make destroy-kind-cluster
```
## Development receipts
......
apiVersion: v1
kind: Namespace
metadata:
name: netobserv-privileged
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: netobserv-ebpf-agent
namespace: netobserv-privileged
labels:
k8s-app: netobserv-ebpf-agent
spec:
selector:
matchLabels:
k8s-app: netobserv-ebpf-agent
template:
metadata:
labels:
k8s-app: netobserv-ebpf-agent
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: netobserv-ebpf-agent
image: localhost/ebpf-agent:test
securityContext:
privileged: true
runAsUser: 0
env:
- name: SAMPLING
value: "1"
- name: CACHE_ACTIVE_TIMEOUT
value: 200ms
- name: LOG_LEVEL
value: debug
- name: FLOWS_TARGET_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: FLOWS_TARGET_PORT
value: "9999"
- name: ENABLE_RTT
value: "true"
- name: ENABLE_PKT_DROPS
value: "true"
- name: ENABLE_DNS_TRACKING
value: "true"
volumeMounts:
- name: bpf-kernel-debug
mountPath: /sys/kernel/debug
mountPropagation: Bidirectional
volumes:
- name: bpf-kernel-debug
hostPath:
path: /sys/kernel/debug
type: Directory
#!/usr/bin/env bash
set -eux
DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
KIND_IMAGE="kindest/node:v1.27.3"
# deploy_kind installs the kind cluster
deploy_kind() {
cat <<EOF | kind create cluster --image ${KIND_IMAGE} --config=- --kubeconfig=${DIR}/kubeconfig
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
networking:
podSubnet: $NET_CIDR_IPV4,$NET_CIDR_IPV6
serviceSubnet: $SVC_CIDR_IPV4,$SVC_CIDR_IPV6
ipFamily: $IP_FAMILY
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: ClusterConfiguration
apiServer:
extraArgs:
v: "5"
controllerManager:
extraArgs:
v: "5"
scheduler:
extraArgs:
v: "5"
- role: worker
- role: worker
EOF
}
# install_netobserv-agent will install the daemonset
# into each kind docker container
install_netobserv-agent() {
docker build . -t localhost/ebpf-agent:test
kind load docker-image localhost/ebpf-agent:test
kubectl apply -f ${DIR}/agent.yml
}
# print_success prints a little success message at the end of the script
print_success() {
set +x
echo "Your kind cluster was created successfully"
echo "Run the following to load the kubeconfig:"
echo "export KUBECONFIG=${DIR}/kubeconfig"
set -x
}
KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-kind}"
IP_FAMILY=${IP_FAMILY:-dual}
NET_CIDR_IPV4=${NET_CIDR_IPV4:-10.244.0.0/16}
SVC_CIDR_IPV4=${SVC_CIDR_IPV4:-10.96.0.0/16}
NET_CIDR_IPV6=${NET_CIDR_IPV6:-fd00:10:244::/48}
SVC_CIDR_IPV6=${SVC_CIDR_IPV6:-fd00:10:96::/112}
# At the minimum, deploy the kind cluster
deploy_kind
export KUBECONFIG=${DIR}/kubeconfig
oc label node kind-worker node-role.kubernetes.io/worker=
oc label node kind-worker2 node-role.kubernetes.io/worker=
install_netobserv-agent
# Print success at the end of this script
print_success
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment