We have launched a special series to help you prepare for the Kubernetes CKS certifications. Check out the Certified Kubernetes Security Specialist Challenge Series, where you can put all your hardcore Kubernetes skills to the test.
This is a companion discussion topic for the original entry at https://kodekloud.com/blog/cks-challenges/
Install the ‘falco’ utility on the controlplane node and start it as a systemd service
I Follow the installation steps in https://falco.org/docs/install-operate/installation/
24 curl -s https://falco.org/repo/falcosecurity-packages.asc | apt-key add -
25 echo “deb https://download.falco.org/packages/deb stable main” | tee -a /etc/apt/sources.list.d/falcosecurity.list
26 apt-get update -y
27 apt install -y dkms make linux-headers-$(uname -r)
28 # If you use the falco-driver-loader to build the BPF probe locally you need also clang toolchain
29 apt install -y clang llvm
30 apt-get install -y falco
But it is not working. falco service is not loading. Kindly let me know whether I am following the correct link
- Create a single rule in the audit policy that will record events for the ‘two’ objects depicting abnormal behaviour in the ‘citadel’ namespace. This rule should however be applied to all ‘three’ namespaces shown in the diagram at a ‘metadata’ level. Omit the ‘RequestReceived’ stage.
above task alone failling. Even I have created everything correct. I have tried 3 to 4 time. It is not working. Can someone plz help me.
root@controlplane /etc/kubernetes ➜ cat audit-policy.yaml
apiVersion: audit.k8s.io/v1
kind: Policy
Don’t generate audit events for all requests in RequestReceived stage.
omitStages:
- “RequestReceived”
rules:
- level: Metadata
resources:
- resources: [“pods”, “configmaps”]
namespaces: [“omega”, “citadel”, “eden-prime”]
1.2. Now we can update the kube-apiserver.yml file to use above log files.
root@controlplane /etc/kubernetes ➜ cd manifests/
root@controlplane /etc/kubernetes/manifests ➜ vi kube-apiserver.yaml
root@controlplane /etc/kubernetes/manifests ➜ cat kube-apiserver.yaml
root@controlplane /etc/kubernetes/manifests ➜ cat kube-apiserver.yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.121.249:6443
creationTimestamp: null
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- kube-apiserver
- –advertise-address=192.168.121.249
- –allow-privileged=true
- –authorization-mode=Node,RBAC
- –client-ca-file=/etc/kubernetes/pki/ca.crt
- –enable-admission-plugins=NodeRestriction
- –enable-bootstrap-token-auth=true
- –etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
- –etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
- –etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
- –etcd-servers=https://127.0.0.1:2379
- –kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- –kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- –kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- –proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- –proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- –requestheader-allowed-names=front-proxy-client
- –requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- –requestheader-extra-headers-prefix=X-Remote-Extra-
- –requestheader-group-headers=X-Remote-Group
- –requestheader-username-headers=X-Remote-User
- –secure-port=6443
- –service-account-issuer=https://kubernetes.default.svc.cluster.local
- –service-account-key-file=/etc/kubernetes/pki/sa.pub
- –service-account-signing-key-file=/etc/kubernetes/pki/sa.key
- –service-cluster-ip-range=10.96.0.0/12
- –tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- –tls-private-key-file=/etc/kubernetes/pki/apiserver.key
- –audit-policy-file=/etc/kubernetes/audit-policy.yaml
- –audit-log-path=/var/log/kubernetes/audit/audit.log
image: k8s.gcr.io/kube-apiserver:v1.23.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 192.168.121.249
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
name: kube-apiserver
readinessProbe:
failureThreshold: 3
httpGet:
host: 192.168.121.249
path: /readyz
port: 6443
scheme: HTTPS
periodSeconds: 1
timeoutSeconds: 15
resources:
requests:
cpu: 250m
startupProbe:
failureThreshold: 24
httpGet:
host: 192.168.121.249
path: /livez
port: 6443
scheme: HTTPS
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 15
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/ca-certificates
name: etc-ca-certificates
readOnly: true
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
- mountPath: /usr/local/share/ca-certificates
name: usr-local-share-ca-certificates
readOnly: true
- mountPath: /usr/share/ca-certificates
name: usr-share-ca-certificates
readOnly: true
- mountPath: /etc/kubernetes/audit-policy.yaml
name: audit
readOnly: true
- mountPath: /var/log/kubernetes/audit/
name: audit-log
readOnly: false
hostNetwork: true
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
volumes:
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: ca-certs
- hostPath:
path: /etc/ca-certificates
type: DirectoryOrCreate
name: etc-ca-certificates
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
- hostPath:
path: /usr/local/share/ca-certificates
type: DirectoryOrCreate
name: usr-local-share-ca-certificates
- hostPath:
path: /usr/share/ca-certificates
type: DirectoryOrCreate
name: usr-share-ca-certificates
- name: audit
hostPath:
path: /etc/kubernetes/audit-policy.yaml
type: File
- name: audit-log
hostPath:
path: /var/log/kubernetes/audit/
type: DirectoryOrCreate
status: {}