Skip to content

Commit

Permalink
Add kafka and falco (#68)
Browse files Browse the repository at this point in the history
* Add kafka and falco

* Use plain port

---------

Co-authored-by: Frédéric Collonval <[email protected]>
  • Loading branch information
fcollonval and fcollonval authored Dec 11, 2024
1 parent 7ff1770 commit 3e2fa5b
Show file tree
Hide file tree
Showing 12 changed files with 780 additions and 1 deletion.
Empty file.
78 changes: 78 additions & 0 deletions charts/datalayer-falco/values-any.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
collectors:
kubernetes:
enabled: true
customRules:
rules-override-default.yaml: |-
# Whitelist strimzi operator for Kafka to connect to k8s API
- macro: user_known_contact_k8s_api_server_activities
condition: (container.image.repository=quay.io/strimzi/operator)
override:
condition: replace
# Enable cryptomining related rules
# See https://falco.org/blog/falco-detect-cryptomining
# Tested with https://github.com/n1g3ld0ugla5/Falco-Cryptomining-CNCF
- rule: Detect outbound connections to common miner pool ports
enabled: true
override:
enabled: replace
- rule: Set Setuid or Setgid bit
enabled: true
override:
enabled: replace
falco:
rules_files:
# Order matters to apply override the rule should
# be loaded first
- /etc/falco/falco_rules.yaml
- /etc/falco/falco-incubating_rules.yaml
- /etc/falco/falco-sandbox_rules.yaml
- /etc/falco/rules.d
falcoctl:
config:
artifact:
install:
refs:
- falco-rules:3
- falco-incubating-rules:4
- falco-sandbox-rules:4
- ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.2.1
follow:
refs:
- falco-rules:3
- falco-incubating-rules:4
- falco-sandbox-rules:4
falcosidekick:
enabled: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: role.datalayer.io/system
operator: In
values:
- "true"
# See https://github.com/falcosecurity/falcosidekick/blob/master/config_example.yaml
config:
# Not working ??
# templatedfields: "Dkey:{{ or (index .OutputFields \"k8s.ns.name\") \"bar\" }},k8s_labels:{{ printf \"%q\" (or (index .OutputFields \"k8s.pod.labels\") \"error\") }}"
kafka:
hostport: datalayer-kafka-kafka-bootstrap.datalayer-kafka.svc.cluster.local:9092
topic: falco-events
serviceMonitor:
enabled: true
additionalLabels:
monitoring.datalayer.io/enabled: "true"
monitoring.datalayer.io/instance: "observer"
prometheusRules:
enabled: true
# metrics:
# enable: true
# serviceMonitor:
# create: true
# labels:
# monitoring.datalayer.io/enabled: "true"
# monitoring.datalayer.io/instance: "observer"
grafana:
dashboards:
enabled: true
Empty file.
17 changes: 17 additions & 0 deletions charts/datalayer-kafka/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
apiVersion: v1
description: Datalayer Kafka
name: datalayer-kafka
version: 0.1.0
appVersion: 0.1.0
home: https://datalayer.tech
sources:
- https://github.com/datalayer/helm-charts/tree/main/charts/datalayer-kafka
icon: https://assets.datalayer.tech/datalayer-square.png
maintainers:
- name: Datalayer
email: [email protected]
url: https://datalayer.io
dependencies:
- name: strimzi-kafka-operator
version: "0.44.0"
repository: "https://strimzi.io/charts/"
Empty file.
13 changes: 13 additions & 0 deletions charts/datalayer-kafka/templates/falco-topic.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: falco-events
namespace: {{ .Release.Namespace }}
labels:
strimzi.io/cluster: {{ .Release.Name }}
spec:
partitions: 1
replicas: 1
config:
retention.ms: 7200000
segment.bytes: 1073741824
200 changes: 200 additions & 0 deletions charts/datalayer-kafka/templates/kafka-metrics.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: kafka-metrics
labels:
app: strimzi
data:
kafka-metrics-config.yml: |
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
lowercaseOutputName: true
rules:
# Special cases and very specific rules
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), topic=(.+), partition=(.*)><>Value
name: kafka_server_$1_$2
type: GAUGE
labels:
clientId: "$3"
topic: "$4"
partition: "$5"
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Value
name: kafka_server_$1_$2
type: GAUGE
labels:
clientId: "$3"
broker: "$4:$5"
- pattern: kafka.server<type=(.+), cipher=(.+), protocol=(.+), listener=(.+), networkProcessor=(.+)><>connections
name: kafka_server_$1_connections_tls_info
type: GAUGE
labels:
cipher: "$2"
protocol: "$3"
listener: "$4"
networkProcessor: "$5"
- pattern: kafka.server<type=(.+), clientSoftwareName=(.+), clientSoftwareVersion=(.+), listener=(.+), networkProcessor=(.+)><>connections
name: kafka_server_$1_connections_software
type: GAUGE
labels:
clientSoftwareName: "$2"
clientSoftwareVersion: "$3"
listener: "$4"
networkProcessor: "$5"
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total):"
name: kafka_server_$1_$4
type: COUNTER
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+):"
name: kafka_server_$1_$4
type: GAUGE
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total)
name: kafka_server_$1_$4
type: COUNTER
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+)
name: kafka_server_$1_$4
type: GAUGE
labels:
listener: "$2"
networkProcessor: "$3"
# Some percent metrics use MeanRate attribute
# Ex) kafka.server<type=(KafkaRequestHandlerPool), name=(RequestHandlerAvgIdlePercent)><>MeanRate
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>MeanRate
name: kafka_$1_$2_$3_percent
type: GAUGE
# Generic gauges for percents
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>Value
name: kafka_$1_$2_$3_percent
type: GAUGE
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*, (.+)=(.+)><>Value
name: kafka_$1_$2_$3_percent
type: GAUGE
labels:
"$4": "$5"
# Generic per-second counters with 0-2 key/value pairs
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+)><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
# Generic gauges with 0-2 key/value pairs
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
# Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
# Note that these are missing the '_sum' metric!
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*), (.+)=(.+)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
"$6": "$7"
quantile: "0.$8"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
quantile: "0.$6"
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
quantile: "0.$4"
# KRaft overall related metrics
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
- pattern: "kafka.server<type=raft-metrics><>(.+-total|.+-max):"
name: kafka_server_raftmetrics_$1
type: COUNTER
- pattern: "kafka.server<type=raft-metrics><>(current-state): (.+)"
name: kafka_server_raftmetrics_$1
value: 1
type: UNTYPED
labels:
$1: "$2"
- pattern: "kafka.server<type=raft-metrics><>(.+):"
name: kafka_server_raftmetrics_$1
type: GAUGE
# KRaft "low level" channels related metrics
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
- pattern: "kafka.server<type=raft-channel-metrics><>(.+-total|.+-max):"
name: kafka_server_raftchannelmetrics_$1
type: COUNTER
- pattern: "kafka.server<type=raft-channel-metrics><>(.+):"
name: kafka_server_raftchannelmetrics_$1
type: GAUGE
# Broker metrics related to fetching metadata topic records in KRaft mode
- pattern: "kafka.server<type=broker-metadata-metrics><>(.+):"
name: kafka_server_brokermetadatametrics_$1
type: GAUGE
zookeeper-metrics-config.yml: |
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
lowercaseOutputName: true
rules:
# replicated Zookeeper
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+)><>(\\w+)"
name: "zookeeper_$2"
type: GAUGE
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)"
name: "zookeeper_$3"
type: GAUGE
labels:
replicaId: "$2"
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(Packets\\w+)"
name: "zookeeper_$4"
type: COUNTER
labels:
replicaId: "$2"
memberType: "$3"
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(\\w+)"
name: "zookeeper_$4"
type: GAUGE
labels:
replicaId: "$2"
memberType: "$3"
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+), name3=(\\w+)><>(\\w+)"
name: "zookeeper_$4_$5"
type: GAUGE
labels:
replicaId: "$2"
memberType: "$3"
Loading

0 comments on commit 3e2fa5b

Please sign in to comment.