Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Monitoring Tool deployment for Kubernetes #245

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions kubernetes/monitoring-tool/elasticsearch-pvc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

98 changes: 98 additions & 0 deletions kubernetes/monitoring-tool/elasticsearch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
spec:
selector:
app: elasticsearch
ports:
- name: http
protocol: TCP
port: 9200
targetPort: 9200
- name: transport
protocol: TCP
port: 9300
targetPort: 9300
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: elasticsearch
component: elasticsearch
release: elasticsearch
name: elasticsearch
spec:
replicas: 1
selector:
matchLabels:
app: elasticsearch
component: elasticsearch
release: elasticsearch
template:
metadata:
creationTimestamp: null
labels:
app: elasticsearch
component: elasticsearch
release: elasticsearch
spec:
containers:
- env:
- name: cluster.name
value: elasticsearch
- name: discovery.type
value: single-node
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
- name: bootstrap.memory_lock
value: "false"
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.10
imagePullPolicy: IfNotPresent
name: elasticsearch
ports:
- containerPort: 9200
name: http
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
securityContext:
privileged: true
runAsUser: 1000
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elasticsearch-data
initContainers:
- command:
- sh
- -c
- chown -R 1000:1000 /usr/share/elasticsearch/data
- sysctl -w vm.max_map_count=262144
- chmod 777 /usr/share/elasticsearch/data
- chmod 777 /usr/share/elasticsearch/data/node
- chmod g+rwx /usr/share/elasticsearch/data
- chgrp 1000 /usr/share/elasticsearch/data
image: busybox:1.29.2
imagePullPolicy: IfNotPresent
name: set-dir-owner
resources: {}
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elasticsearch-data
restartPolicy: Always
securityContext: {}

volumes:
- name: elasticsearch-data
persistentVolumeClaim:
claimName: elasticsearch-data


12 changes: 12 additions & 0 deletions kubernetes/monitoring-tool/grafana-datasource.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
apiVersion: 1
datasources:
- name: Elasticsearch
type: elasticsearch
access: proxy
url: http://elasticsearch:9200
jsonData:
esVersion: 7.x
timeField: "@timestamp"
secureJsonData:
username: admin
password: admin
47 changes: 47 additions & 0 deletions kubernetes/monitoring-tool/grafana.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
apiVersion: v1
kind: Service
metadata:
name: grafana
spec:
selector:
app: grafana
ports:
- name: http
protocol: TCP
port: 3000
targetPort: 3000
type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana-deployment
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
containers:
- name: grafana
image: grafana/grafana:latest
ports:
- containerPort: 3000
volumeMounts:
- name: grafana-storage
mountPath: /var/lib/grafana
- name: grafana-datasources
mountPath: /etc/grafana/provisioning/datasources
readOnly: true
volumes:
- name: grafana-storage
emptyDir: {}
- name: grafana-datasources
configMap:
name: grafana-datasources


59 changes: 59 additions & 0 deletions kubernetes/monitoring-tool/kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
apiVersion: v1
kind: Service
metadata:
name: kafka
spec:
selector:
app: kafka
ports:
- protocol: TCP
port: 9092
targetPort: 9092
# type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka
spec:
replicas: 1
selector:
matchLabels:
app: kafka
template:
metadata:
labels:
app: kafka
spec:
containers:
- name: kafka
image: bitnami/kafka:2.8.1
ports:
- containerPort: 9092
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KAFKA_LISTENERS
# Uncomment on the below line if you want to use it externally
#value: "INTERNAL_PLAINTEXT://0.0.0.0:9093,EXTERNAL_PLAINTEXT://0.0.0.0:9092"
value: "INTERNAL_PLAINTEXT://0.0.0.0:9092"
- name: KAFKA_ADVERTISED_LISTENERS
value: "INTERNAL_PLAINTEXT://$(POD_IP):9092"
# Uncomment on the below line if you want to use it externally
#value: "INTERNAL_PLAINTEXT://$(POD_IP):9093,EXTERNAL_PLAINTEXT://162.19.225.35:9092"
- name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
value: "INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT"
- name: KAFKA_INTER_BROKER_LISTENER_NAME
value: "INTERNAL_PLAINTEXT"
- name: KAFKA_AUTO_CREATE_TOPICS_ENABLE
value: "true"
- name: ALLOW_PLAINTEXT_LISTENER
value: "yes"
- name: KAFKA_ZOOKEEPER_CONNECT
value: "zookeeper:2181"
- name: KAFKA_ZOOKEEPER_PROTOCOL
value: "PLAINTEXT"


31 changes: 31 additions & 0 deletions kubernetes/monitoring-tool/logstash-configmap.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-configmap
data:
logstash.yml: |
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
logstash.conf: |
# all input will come from filebeat, no local logs
input {
kafka {
bootstrap_servers => "kafka:9092"
client_id => "logstash"
group_id => "logstash"
consumer_threads => 3
topics => ["ams-instance-stats","ams-webrtc-stats","kafka-webrtc-tester-stats"]
codec => "json"
tags => ["log", "kafka_source"]
type => "log"
}
}
filter {
}
output {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "logstash-%{[type]}-%{+YYYY.MM.dd}"
}
stdout { codec => rubydebug }
}
59 changes: 59 additions & 0 deletions kubernetes/monitoring-tool/logstash.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
apiVersion: v1
kind: Service
metadata:
name: logstash
spec:
selector:
app: logstash
ports:
- name: tcp
protocol: TCP
port: 5000
targetPort: 5000
- name: udp
protocol: UDP
port: 5000
targetPort: 5000
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash
spec:
selector:
matchLabels:
app: logstash
replicas: 1
template:
metadata:
labels:
app: logstash
spec:
containers:
- image: docker.elastic.co/logstash/logstash:7.8.0
name: logstash
ports:
- containerPort: 5000
env:
- name: ES_HOSTS
value: "http://elasticsearch:9200"
resources: {}
volumeMounts:
- name: config-volume
mountPath: /usr/share/logstash/config
- name: logstash-pipeline-volume
mountPath: /usr/share/logstash/pipeline
volumes:
- name: config-volume
configMap:
name: logstash-configmap
items:
- key: logstash.yml
path: logstash.yml
- name: logstash-pipeline-volume
configMap:
name: logstash-configmap
items:
- key: logstash.conf
path: logstash.conf

35 changes: 35 additions & 0 deletions kubernetes/monitoring-tool/zookeeper.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
apiVersion: v1
kind: Service
metadata:
name: zookeeper
spec:
selector:
app: zookeeper
ports:
- protocol: TCP
port: 2181
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zookeeper
spec:
replicas: 1
selector:
matchLabels:
app: zookeeper
template:
metadata:
labels:
app: zookeeper
spec:
containers:
- name: zookeeper
image: confluentinc/cp-zookeeper:latest
ports:
- containerPort: 2181
env:
- name: ZOOKEEPER_CLIENT_PORT
value: "2181"