We need Metric Server for resource usage monitoring as well as for HPA (Horizontal Pod AutoScaler)
mkdir front-proxy
cd front-proxy
vi ca-config.json
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"expiry": "8760h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
vi ca-csr.json
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "IN",
"L": "KL",
"O": "Kubernetes",
"OU": "CA",
"ST": "Kerala"
}
]
}
cfssl gencert -initca ca-csr.json |cfssljson -bare front-proxy-ca
vi front-proxy-csr.json
{
"CN": "front-proxy-ca",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "IN",
"L": "KL",
"O": "Kubernetes",
"OU": "CA",
"ST": "Kerala"
}
]
}
cfssl gencert \
-ca=front-proxy-ca.pem \
-ca-key=front-proxy-ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
front-proxy-csr.json | cfssljson -bare front-proxy
for instance in controller-01 controller-02 controller-03
do
scp front-proxy-ca.pem front-proxy.pem front-proxy-key.pem k8s@${instance}:~/
ssh -t k8s@${instance} sudo "mv front-proxy-ca.pem front-proxy.pem front-proxy-key.pem /var/lib/kubernetes/"
done
vi /etc/systemd/system/kube-apiserver.service
--requestheader-client-ca-file=/var/lib/kubernetes/front-proxy-ca.pem \
--enable-aggregator-routing=true \
--requestheader-allowed-names=front-proxy-ca \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-group-headers=X-Remote-Group \
--requestheader-username-headers=X-Remote-User \
--proxy-client-cert-file=/var/lib/kubernetes/front-proxy.pem \
--proxy-client-key-file=/var/lib/kubernetes/front-proxy-key.pem \
systemctl daemon-reload
systemctl restart kube-apiserver
- List of manifest files
YAMLS="auth-delegator.yaml
auth-reader.yaml
metrics-apiservice.yaml
metrics-server-deployment.yaml
metrics-server-service.yaml
resource-reader.yaml"
- Create a directory to put all manifests
mkdir metric-server
cd metric-server
- Download all files
for FILE in ${YAMLS}
do
wget https://raw.githubusercontent.com/kubernetes-incubator/metrics-server/master/deploy/1.8%2B/${FILE}
done
cd ..
- Add node's hostname/IP to deployment , so that node hostnames can be resolved by metric-server Pod
vi metric-server/metrics-server-deployment.yaml
Add below in Deployment.spec.template.spec
hostAliases:
- ip: 192.168.78.211
hostnames:
- worker-01
- ip: 192.168.78.212
hostnames:
- worker-02
- ip: 192.168.78.213
hostnames:
- worker-03
- Add routes on controller nodes , so that API can reach metric-server Pod IP
From lb-01 , execute below
ROUTE_DATA="worker-01,192.168.78.211,10.10.1.0/24
worker-02,192.168.78.212,10.10.2.0/24
worker-03,192.168.78.213,10.10.3.0/24"
for instance in controller-01 controller-02 controller-03
do
ROUTE_MINI=$(echo "${ROUTE_DATA}" | grep -v -w ${instance})
for ROUTE in ${ROUTE_MINI}
do
echo "#### Adding Route for ${instance} ####"
NET=$(echo ${ROUTE} |awk -F "," '{print $3}')
GW=$(echo ${ROUTE} |awk -F "," '{print $2}')
ssh -t k8s@${instance} sudo -i "echo -e \"\tpost-up route add -net ${NET} gw ${GW}\"|sudo tee --append /etc/network/interfaces"
ssh -t k8s@${instance} sudo -i "route add -net ${NET} gw ${GW}"
done
done
- Deploy Metric Server
kubectl create -f metric-server/
- Wait for 5 minutes and execute below to verify
kubectl describe apiservices v1beta1.metrics.k8s.io
- Output
Name: v1beta1.metrics.k8s.io
Namespace:
Labels: <none>
Annotations: <none>
API Version: apiregistration.k8s.io/v1
Kind: APIService
Metadata:
Creation Timestamp: 2018-10-02T13:49:50Z
Resource Version: 98243
Self Link: /apis/apiregistration.k8s.io/v1/apiservices/v1beta1.metrics.k8s.io
UID: 081f7634-c64a-11e8-b143-0800276d1e86
Spec:
Group: metrics.k8s.io
Group Priority Minimum: 100
Insecure Skip TLS Verify: true
Service:
Name: metrics-server
Namespace: kube-system
Version: v1beta1
Version Priority: 100
Status:
Conditions:
Last Transition Time: 2018-10-02T13:49:55Z
Message: all checks passed
Reason: Passed
Status: True
Type: Available
Events: <none>
- Node resource usage
kubectl top nodes
- Output
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
worker-01 21m 2% 957Mi 50%
worker-02 21m 2% 974Mi 51%
worker-03 20m 2% 1031Mi 54%
- Pod resource usage
kubectl top pods
- Output
NAME CPU(cores) MEMORY(bytes)
busybox-bd8fb7cbd-bqv8z 0m 0Mi
nginx-cdb6b5b95-b8swv 0m 1Mi
nginx-cdb6b5b95-tq946 0m 1Mi
nginx-cdb6b5b95-w4t84 0m 1Mi
Part 16- Deploy Dashboard