forked from eldada/kubernetes-scripts
-
Notifications
You must be signed in to change notification settings - Fork 0
/
podWithHostFS.yaml
100 lines (99 loc) · 2.82 KB
/
podWithHostFS.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
###########################################################
## This file has the configuration for
## - A namespace called 0-danger-zone (for easy sort and context)
## - A hostPath PersistentVolume
## - A PersistentVolumeClaim to use the PersistentVolume
## - A pod with host root file system mounted to /mnt/hostfs
##
## This is useful for exploring the host file system for various
## debugging scenarios like examining the kubelet configuration.
## USE WITH CAUTION! Changes here can break your node!
##
## Deploy: kubectl apply -f podWithHostFS.yaml
## Terminal access: kubectl exec -n 0-danger-zone -it pod-with-host-fs -- bash
## Remove: kubectl delete -f podWithHostFS.yaml
###########################################################
apiVersion: v1
kind: Namespace
metadata:
name: 0-danger-zone
---
apiVersion: v1
kind: Pod
metadata:
name: pod-with-host-fs
namespace: 0-danger-zone
spec:
containers:
- name: container-with-host-fs
image: eldada.jfrog.io/docker/ubuntu:22.04
command:
- 'bash'
- '-c'
- >
echo "############# Setting up some tools...";
apt update && apt install vim curl -y;
echo -e "\n\n############# Get scripts...";
cd /opt;
curl -# -O https://raw.githubusercontent.com/eldada/command-examples/master/scripts/top.sh;
curl -# -O https://raw.githubusercontent.com/eldada/command-examples/master/scripts/procInfo.sh;
chmod +x *.sh;
ls -la;
echo -e "\n\n############# Going to sleep for 30 days...";
sleep 30d
resources:
limits:
cpu: 200m
memory: 200Mi
requests:
cpu: 5m
memory: 50Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /mnt/hostfs
name: hostpath-privileged
securityContext: {}
volumes:
- name: hostpath-privileged
persistentVolumeClaim:
claimName: host-pvc-volume
## Uncomment to apply node affinity when you need to force the pod to start on a given node
## Replace NODE_HOST_NAME with the value of the label "kubernetes.io/hostname" on the node
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/hostname
# operator: In
# values:
# - NODE_HOST_NAME
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: host-pv-volume
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: host-pvc-volume
namespace: 0-danger-zone
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: manual