2023 CKS 真题

1. kube-bench 修复不安全项

1

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# 【!!!】到指定节点

$ vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --authorization-mode=Node,RBAC
$ vim /etc/kubernetes/manifests/etcd.yaml
- --client-cert-auth=true
$ vim /var/lib/kubelet/config.yaml
authentication:
anonymous:
enabled: false # *
webhook:
enable: true # *
authorization:
mode: Webhook # *
$ systemctl daemon-reload
$ systemctl restart kubelet
$ kubectl get pod -A # 检查

2. Pod 指定 ServiceAccount

2

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# 搜`serviceaccount` --> 为Pod配置服务账号

$ vim 2.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: backend-sa
namespace: qa # *
automountServiceAccountToken: false # *
$ kubectl apply -f 2.yaml
$ kubectl get sa -n qa # 检查
$ vim /cks/sa/pod1.yaml
apiVersion: v1
kind: Pod
metadata:
name: backend
namespace: qa
spec:
serviceAccountName: backend-sa # *
containers:
- image: nginx:alpine
imagePullPolicy: IfNotPresent
name: backend
$ kubectl apply -f /cks/sa/pod1.yaml
$ kubectl get pod -n qa # 检查
$ kubectl get pod -n qa -o yaml | grep -i serviceaccountname
$ kubectl delete sa -n qa test01

3. 默认网络策略

3

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# 搜`networkpolicy` --> 网络策略

$ vim /cks/net/p1.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: denypolicy
namespace: testing
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
$ kubectl apply -f /cks/net/p1.yaml
$ kubectl -n testing describe netpol denypolicy # 检查
Name: denypolicy
Namespace: testing
Created on: 2022-12-28 15:18:22 +0800 CST
Labels: <none>
Annotations: <none>
Spec:
PodSelector: <none> (Allowing the specific traffic to all pods in this namespace)
Allowing ingress traffic:
<none> (Selected pods are isolated for ingress connectivity)
Allowing egress traffic:
<none> (Selected pods are isolated for egress connectivity)
Policy Types: Ingress, Egress

4. RBAC - RoleBinding

4

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# 搜`RBAC` --> 使用RBAC鉴权

$ kubectl -n db edit role role-1
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: role-1
namespace: db
rules:
- apiGroups:
- ""
resources:
- services # *
verbs:
- get # *
$ kubectl create role -h # 查看命令帮助
$ kubectl -n db create role role-2 --resource=namespaces --verb=delete
$ kubectl create rolebinding -h # 查看命令帮助
$ kubectl -n db create rolebinding role-2-binding --role=role-2 --serviceaccount=db:service-account-web
$ kubectl describe role -n db # 检查
Name: role-1
Labels: <none>
Annotations: <none>
PolicyRule:
Resources Non-Resource URLs Resource Names Verbs
--------- ----------------- -------------- -----
services [] [] [get]

Name: role-2
Labels: <none>
Annotations: <none>
PolicyRule:
Resources Non-Resource URLs Resource Names Verbs
--------- ----------------- -------------- -----
namespaces [] [] [delete]
$ kubectl describe rolebinding -n db # 检查
Name: role-2-binding
Labels: <none>
Annotations: <none>
Role:
Kind: Role
Name: role-2
Subjects:
Kind Name Namespace
---- ---- ---------
ServiceAccount service-account-web db

5. 日志审计 log audit

5

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# 【!!!】到指定节点
# 搜`audit` --> 审计(粘贴`roles`)

$ vim /etc/kubernetes/logpolicy/sample-policy.yaml
apiVersion: audit.k8s.io/v1
kind: Policy
omitStages:
- "RequestReceived"
rules:
- level: RequestResponse
resources:
- group: ""
resources: ["persistentvolumes"] # *

- level: Request
resources:
- group: ""
resources: ["configmaps"]
namespaces: ["front-apps"] # *

- level: Metadata
resources:
- group: ""
resources: ["secrets", "configmaps"]

- level: Metadata
omitStages:
- "RequestReceived"
$ vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --audit-log-path=/var/log/kubernetes/audit-log.txt
- --audit-log-maxage=10
- --audit-log-maxbackup=2
- --audit-policy-file=/etc/kubernetes/logpolicy/sample-policy.yaml
$ systemctl daemon-reload
$ systemctl restart kubelet
$ kubectl get pod -A # 检查
$ tail -f /var/log/kubernetes/audit-log.txt # 检查

6. 创建Secret

6

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# 搜`secret` --> Secret

$ kubectl -n istio-system get secrets db1-test -o yaml
apiVersion: v1
data:
password: aGVsbG8=
username: ZGIx
kind: Secret
metadata:
name: db1-test
namespace: istio-system
$ echo "ZGIx" | base64 -d > /cks/sec/user.txt
$ echo "aGVsbG8=" | base64 -d > /cks/sec/pass.txt
$ cat /cks/sec/user.txt # 检查
db1
$ cat /cks/sec/pass.txt # 检查
hello
$ kubectl create secret -h # 查看命令帮助
$ kubectl -n istio-system create secret generic db2-test --from-literal=username=production-instance --from-literal=password='KvLftKgs4aVH'
$ vim 6.yaml
apiVersion: v1
kind: Pod
metadata:
name: secret-pod
namespace: istio-system
spec:
containers:
- name: dev-container
image: nginx
volumeMounts:
- name: secret-volume
mountPath: "/etc/secret"
#readOnly: true
volumes:
- name: secret-volume
secret:
secretName: db2-test
#optional: true
$ kubectl apply -f 6.yaml

7. Dockerfile 检测

7

1
2
3
4
5
6
7
8
$ vim /cks/docker/Dockerfile
#USER root
USER nobody
#FROM ubuntu:last
FROM ubuntn:16.04
$ vim /cks/docker/deployment.yaml
#看label是否对应
#修改securityContext:①删除SYS_ADMIN;②privileged为False;③readonly为True

8. 沙箱运行容器 gVisor

8

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# 搜`runtimeclass` --> 容器运行时类

$ vim /cks/gVisor/rc.yaml
apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
name: untrusted
handler: runsc # *
$ kubectl apply -f /cks/gVisor/rc.yaml
$ kubectl -n server edit deploy nginx-host # 多个
spec:
template:
spec:
runtimeClassName: untrusted # *
containers:
- ...
$ kubectl get pod -n server # 检查

9. 网络策略 NetworkPolicy

9

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# 搜`networkpolicy` --> 网络策略

$ kubecrl get pod -n dev-team --show-label
$ kubecrl get ns qaqa --show-label
$ vim /cks/net/po.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: pod-restriction
namespace: dev-team
spec:
podSelector:
matchLabels:
environment: testing # *
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
name: qaqa # *
- from:
- namespaceSelector: {} # 任意namespace
podSelector: # 删除`-`
matchLabels:
environment: testing # *
$ kubectl apply -f /cks/net/po.yaml
$ kubectl -n dev-team describe networkpolicy pod-restriction
Name: pod-restriction
Namespace: dev-team
Created on: 2022-12-29 19:56:59 +0800 CST
Labels: <none>
Annotations: <none>
Spec:
PodSelector: environment=testing
Allowing ingress traffic:
To Port: <any> (traffic allowed to all ports)
From:
NamespaceSelector: name=qa
----------
To Port: <any> (traffic allowed to all ports)
From:
NamespaceSelector: <none>
PodSelector: environment=testing
Not affecting egress traffic
Policy Types: Ingress

10. Trivy 扫描镜像安全漏洞

10

1
2
3
4
5
6
7
8
# 【!!!】到指定节点
# Reference --> Command line tool --> kubectl Cheat Sheet --> Formatting output

$ kubectl get pods -n kamino --output=custom-columns="NAME:.metadata.name,IMAGE:.spec.containers[*].image"
$ trivy image -h # 查看命令帮助
$ trivy image --severity HIGH,CRITICAL docker.io/library/nginx:1.13
#$ for i in {aaa:1,bbb:2,ccc:3};do trivy image -s "HIGH,CRITICAL" $i >> 10.txt;done
$ kubectl delete pod xxx --force

11. AppArmor

11

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# 搜`apparmor` --> 使用AppArmor限制容器对资源的访问

# 【!!!】到指定节点
$ cd /etc/apparmor.d/
$ apparmor_status | grep nginx
$ apparmor_parser nginx_apparmor
$ apparmor_status | grep nginx
nginx-profile-3

# 【!!!】退回到node1
$ vim /cks/KSSH00401/nginx-deploy.yaml
metadata:
name: podx
annotations:
#container.apparmor.security.beta.kubernetes.io/<容器名>: localhost/<策略>
container.apparmor.security.beta.kubernetes.io/podx: localhost/nginx-profile-3
$ kubectl apply -f /cks/KSSH00401/nginx-deploy.yaml
$ kubectl get pod # 检查

12. Sysdig & Falco

12

1
2
3
4
5
6
7
8
9
10
11
# 【!!!】到指定节点

$ vim /etc/falco/falco_rules.local.yaml
- rule: rule1
desc: rule1
condition: container.name = "redis123"
output: "%evt.time,%user.name,%proc.name"
#output: "%evt.time,%user.uid,%proc.name" # 第二次
priority: WARNING
$ sudo falco -M 30 -r /etc/falco/falco_rules.local.yaml >> /opt/KSR00101/incidents/summary # 两次
$ cat /opt/KSR00101/incidents/summary # 检查

13. Container 安全上下文

13

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# 搜`context` --> 为Pod或容器配置安全上下文

$ kubectl -n sec-ns edit deploy secdep
...
spec:
securityContext: # 任务1
runAsUser: 30000
...
containers:
- name: demo-1
securityContext:
allowPrivilegeEscalation: false # 任务2
readOnlyRootFilesystem: true # 任务3
...
- name: demo-1
securityContext: # 如果有多个容器,每个容器都要增加这三行
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
$ kubectl -n sec-ns get deploy # 检查

14. 启用 API Server 认证

14

1
2
3
4
5
6
7
8
9
10
# 【!!!】到指定节点

$ vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --authorization-mode=Node,RBAC
- --enable-admission-plugins=NodeRestriction
$ systemctl daemon-reload
$ systemctl restart kubelet
$ kubectl get pod -A # 检查
$ kubectl delete clusterrolebinding system:anonymous
$ kubectl get pod -A --kube-config=/etc/kubernetes/admin.conf # 检查

15. TLS 通信加强

15

1
2
3
4
5
6
7
8
9
10
11
# 【!!!】到指定节点
# 搜`kube-apiserver` --> kube-apiserver

$ vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --tls-min-version=VersionTLS13
- --tls-cipher-suites=TLS_AES_128_GCM_SHA256
$ vim /etc/kubernetes/manifests/etcd.yaml
- --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
$ systemctl daemon-reload
$ systemctl restart kubelet
$ kubectl get pod -A # 检查

16. ImagePolicyWebhook 容器镜像扫描

16

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# 【!!!】到指定节点
# 搜`imagepolicywebhook` --> 准入控制器参考

$ vim /etc/kubernetes/epconfig/admission_configuration.json
"defaultAllow": true # 改为false
$ vim /etc/kubernetes/epconfig/kubeconfig.yml
clusters:
- cluster:
certificate-authority: /etc/kubernetes/epconfig/server.crt
server: https://image...svc:1323/image_policy # *
name: bouncer_webhook
$ vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --enable-admission-plugins=NodeRestriction,ImagePolicyWebhook # 添加
- --admission-control-config-file=/etc/kubernetes/epconfig/admission_configuration.json # *
$ systemctl daemon-reload
$ systemctl restart kubelet
$ kubectl apply -f /cks/img/web1.yaml # 检查