diff --git a/.github/workflows/agreements.yaml b/.github/workflows/agreements.yaml
index f2ef07588..e147565e9 100644
--- a/.github/workflows/agreements.yaml
+++ b/.github/workflows/agreements.yaml
@@ -7,7 +7,7 @@ on:
jobs:
call-workflow-agreements:
- uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-agreements.yaml@v1.5.1
+ uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-agreements.yaml@v1.7.0
permissions:
actions: read
contents: read
diff --git a/.github/workflows/cd-dashboard-release.yaml b/.github/workflows/cd-dashboard-release.yaml
new file mode 100644
index 000000000..3cbfd02b9
--- /dev/null
+++ b/.github/workflows/cd-dashboard-release.yaml
@@ -0,0 +1,22 @@
+name: dashboard-release
+on:
+ push:
+ tags:
+ - "v*"
+
+jobs:
+ dashboard-release:
+ name: Dashboard release
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ packages: write
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v4
+ - name: Upload dashboard to release assets
+ run: |
+ VERSION=$(echo $GITHUB_REF | cut -d / -f 3)
+ gh release upload $VERSION dashboard/dashboard.xml
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml
index 143718d88..3314046c0 100644
--- a/.github/workflows/ci-main.yaml
+++ b/.github/workflows/ci-main.yaml
@@ -71,6 +71,8 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
+ with:
+ python-version: "3.10"
- uses: pre-commit/action@v3.0.1
test-unit:
name: Test Unit Python ${{ matrix.python-version }}
diff --git a/.github/workflows/mike.yaml b/.github/workflows/mike.yaml
index b67b6aa46..29798dfa8 100644
--- a/.github/workflows/mike.yaml
+++ b/.github/workflows/mike.yaml
@@ -45,7 +45,7 @@ jobs:
sudo apt install gnome-keyring
BRANCH=$(echo $GITHUB_REF | cut -d / -f 3)
echo $BRANCH
- pip3 install poetry=="1.5.1"
+ curl -sSL https://install.python-poetry.org | python3 -
poetry install
poetry run pip install 'setuptools==65.6.3'
poetry run mike deploy -p $BRANCH
diff --git a/.github/workflows/release-notes.yaml b/.github/workflows/release-notes.yaml
index 9b8106622..215a78250 100644
--- a/.github/workflows/release-notes.yaml
+++ b/.github/workflows/release-notes.yaml
@@ -10,6 +10,6 @@ jobs:
permissions:
contents: write
packages: write
- uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-release-notes.yaml@v1.5.1
+ uses: splunk/addonfactory-github-workflows/.github/workflows/reusable-release-notes.yaml@v1.7.0
secrets:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 8855eed80..7ca23a8e4 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -15,27 +15,28 @@
#
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.1.0
+ rev: v4.6.0
hooks:
- id: check-merge-conflict
- id: debug-statements
- repo: https://github.com/asottile/pyupgrade
- rev: v2.31.0
+ rev: v3.17.0
hooks:
- id: pyupgrade
args:
- --py3-plus
- repo: https://github.com/psf/black
- rev: 22.3.0
+ rev: 24.8.0
hooks:
- id: black
- repo: https://github.com/PyCQA/isort
- rev: 5.12.0
+ rev: 5.13.2
hooks:
- id: isort
args: ["--profile", "black"]
- repo: https://github.com/pre-commit/mirrors-mypy
- rev: v0.931
+ rev: v1.11.2
hooks:
- id: mypy
- additional_dependencies: [types-all]
+ exclude: (^ui_tests|^test*|^docs)
+ additional_dependencies: [types-requests, types-PyYAML]
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c278b97b4..28a82f404 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,15 @@
### Fixed
+## [1.12.0]
+
+### Changed
+- add metrics dashboard
+- add support for IPv6 polling and traps for kubernetes deployment
+- add support for IPv6 polling and traps for docker deployment
+- add nodeSelector and podAntiAffinity to sim chart
+- add support for new IP format in dashboard
+
## [1.11.0]
### Changed
diff --git a/charts/splunk-connect-for-snmp/Chart.lock b/charts/splunk-connect-for-snmp/Chart.lock
index 2e8b67e6d..48322020a 100644
--- a/charts/splunk-connect-for-snmp/Chart.lock
+++ b/charts/splunk-connect-for-snmp/Chart.lock
@@ -4,9 +4,9 @@ dependencies:
version: 13.18.5
- name: redis
repository: https://charts.bitnami.com/bitnami
- version: 19.6.2
+ version: 20.2.0
- name: mibserver
repository: https://pysnmp.github.io/mibs/charts/
- version: 1.15.11
-digest: sha256:bf06c372ef537ce231d60cc88f571997c6be554e0a2ae0ead95d262183d6aabe
-generated: "2024-07-24T13:03:51.800592+02:00"
+ version: 1.15.13
+digest: sha256:dedfae30b39e9b78a83f8e1822666095f79ba2b9b112b04f7a11a2a318674713
+generated: "2024-10-14T10:28:52.408747265Z"
diff --git a/charts/splunk-connect-for-snmp/Chart.yaml b/charts/splunk-connect-for-snmp/Chart.yaml
index f68cd4465..8e1d78309 100644
--- a/charts/splunk-connect-for-snmp/Chart.yaml
+++ b/charts/splunk-connect-for-snmp/Chart.yaml
@@ -14,19 +14,19 @@ type: application
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: 1.11.0
+version: 1.12.0-beta.6
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
-appVersion: "1.11.0"
+appVersion: "1.12.0-beta.6"
#
dependencies:
- name: mongodb
version: ~13.18.0
repository: https://charts.bitnami.com/bitnami
- name: redis
- version: ~19.6.0
+ version: ~20.2.0
repository: https://charts.bitnami.com/bitnami
- name: mibserver
version: ~1.15
diff --git a/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml b/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml
index 2123edf85..dfecc4d1c 100644
--- a/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml
+++ b/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml
@@ -60,6 +60,29 @@ spec:
- name: config
mountPath: "/config"
readOnly: true
+ {{- with .Values.sim.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- if eq .Values.sim.podAntiAffinity "hard" }}
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: "kubernetes.io/hostname"
+ labelSelector:
+ matchLabels:
+ {{- include "splunk-connect-for-snmp.sim.selectorLabels" . | nindent 22 }}
+ {{- else if eq .Values.sim.podAntiAffinity "soft" }}
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 1
+ podAffinityTerm:
+ topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ {{- include "splunk-connect-for-snmp.sim.selectorLabels" . | nindent 22 }}
+ {{- end }}
volumes:
- name: config
configMap:
diff --git a/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml b/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml
index 8948d5a68..8bd34ded8 100644
--- a/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml
+++ b/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml
@@ -59,6 +59,8 @@ spec:
value: "http://{{ printf "%s-%s" .Release.Name "mibserver" }}/standard.txt"
- name: LOG_LEVEL
value: {{ .Values.traps.logLevel | default "INFO" }}
+ - name: PYSNMP_DEBUG
+ value: {{ .Values.pysnmpDebug | default "" | quote }}
{{- if .Values.splunk.protocol }}
- name: SPLUNK_HEC_SCHEME
value: {{ .Values.splunk.protocol | default "https" | quote }}
@@ -82,10 +84,21 @@ spec:
secretKeyRef:
name: {{ include "splunk-connect-for-snmp.name" . }}-splunk
key: hec_token
+ - name: IPv6_ENABLED
+ {{- if has "IPv6" .Values.traps.ipFamilies}}
+ value: "true"
+ {{ else }}
+ value: "false"
+ {{- end }}
ports:
- name: snmp-udp
containerPort: 2162
protocol: UDP
+ {{- if has "IPv6" .Values.traps.ipFamilies}}
+ - name: snmp-udp6
+ containerPort: 2163
+ protocol: UDP
+ {{- end }}
volumeMounts:
- name: config
mountPath: "/app/config"
diff --git a/charts/splunk-connect-for-snmp/templates/traps/service.yaml b/charts/splunk-connect-for-snmp/templates/traps/service.yaml
index b51fd48d5..967e0c646 100644
--- a/charts/splunk-connect-for-snmp/templates/traps/service.yaml
+++ b/charts/splunk-connect-for-snmp/templates/traps/service.yaml
@@ -8,6 +8,7 @@ metadata:
annotations:
{{- if .Values.traps.service.usemetallb }}
metallb.universe.tf/allow-shared-ip: {{ .Values.traps.service.metallbsharingkey | default "splunk-connect" | quote }}
+ metallb.universe.tf/loadBalancerIPs: {{ .Values.traps.loadBalancerIP }}
{{- end }}
{{- if .Values.traps.service.annotations }}
{{ toYaml .Values.traps.service.annotations | indent 4 }}
@@ -20,7 +21,8 @@ spec:
type: {{ .Values.traps.service.type }}
externalTrafficPolicy: {{ .Values.traps.service.externalTrafficPolicy | default "Local" }}
{{- if .Values.traps.loadBalancerIP }}
- loadBalancerIP: {{ .Values.traps.loadBalancerIP }}
+ ipFamilyPolicy: {{ .Values.traps.ipFamilyPolicy }}
+ ipFamilies: {{ .Values.traps.ipFamilies | toYaml | nindent 2 }}
{{- end }}
ports:
- port: {{ .Values.traps.service.port }}
@@ -30,6 +32,15 @@ spec:
targetPort: 2162
protocol: UDP
name: snmp-udp
+ {{- if has "IPv6" .Values.traps.ipFamilies}}
+ - port: {{ .Values.traps.service.ipv6Port | default 2163}}
+ {{- if and .Values.traps.service.nodePort (eq .Values.traps.service.type "NodePort")}}
+ nodePort: {{ .Values.traps.service.ipv6NodePort | default 30003 }}
+ {{- end }}
+ targetPort: 2163
+ protocol: UDP
+ name: snmp-udp6
+ {{- end }}
selector:
{{- include "splunk-connect-for-snmp.traps.selectorLabels" . | nindent 4 }}
{{- end -}}
\ No newline at end of file
diff --git a/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl
index ddb6ef803..7e3e6647e 100644
--- a/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl
+++ b/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl
@@ -111,6 +111,8 @@ Common labels
value: {{ .Values.worker.udpConnectionTimeout | default "3" | quote }}
- name: MAX_OID_TO_PROCESS
value: {{ .Values.poller.maxOidToProcess | default "70" | quote }}
+- name: PYSNMP_DEBUG
+ value: {{ .Values.pysnmpDebug | default "" | quote }}
- name: PROFILES_RELOAD_DELAY
value: {{ .Values.worker.profilesReloadDelay | default "60" | quote }}
- name: MIB_SOURCES
diff --git a/charts/splunk-connect-for-snmp/values.schema.json b/charts/splunk-connect-for-snmp/values.schema.json
index ba39ae077..c8036dca0 100644
--- a/charts/splunk-connect-for-snmp/values.schema.json
+++ b/charts/splunk-connect-for-snmp/values.schema.json
@@ -37,6 +37,9 @@
"imagePullSecrets": {
"type": "array"
},
+ "pysnmpDebug": {
+ "type": "string"
+ },
"UI": {
"type": "object",
"additionalProperties": false,
@@ -244,6 +247,15 @@
"type": "boolean"
}
}
+ },
+ "podAnnotations": {
+ "type": "object"
+ },
+ "podAntiAffinity": {
+ "type": "string"
+ },
+ "nodeSelector": {
+ "type": "object"
}
}
},
@@ -716,12 +728,24 @@
},
"nodePort": {
"type": "integer"
+ },
+ "ipv6Port": {
+ "type": "integer"
+ },
+ "ipv6NodePort": {
+ "type": "integer"
}
}
},
"loadBalancerIP": {
"type": "string"
},
+ "ipFamilyPolicy": {
+ "type": "string"
+ },
+ "ipFamilies": {
+ "type": "array"
+ },
"resources": {
"type": "object",
"additionalProperties": false,
diff --git a/charts/splunk-connect-for-snmp/values.yaml b/charts/splunk-connect-for-snmp/values.yaml
index f600042c5..d57196226 100644
--- a/charts/splunk-connect-for-snmp/values.yaml
+++ b/charts/splunk-connect-for-snmp/values.yaml
@@ -126,6 +126,10 @@ sim:
autoscaling:
enabled: false
+ podAnnotations: { }
+ podAntiAffinity: soft
+ nodeSelector: { }
+
################################################################################
# SC4SNMP components settings
################################################################################
@@ -425,13 +429,19 @@ traps:
# on a multi-node it's better to set this as NodePort and configure traps.service.nodePort
type: LoadBalancer
port: 162
+ # ipv6Port: 2163
+
# nodePort will be set only when type of service is a NodePort
#nodePort: 30000
+ #ipv6NodePort: 30003
#loadBalancerIP must be set to the IP address in the metallb pool.
#It is required when service type is set to LoadBalancer.
#loadBalancerIP: 18.117.100.37
loadBalancerIP: ""
+ ipFamilyPolicy: SingleStack
+ ipFamilies:
+ - IPv4
resources: {}
# limits:
diff --git a/dashboard/dashboard.xml b/dashboard/dashboard.xml
new file mode 100644
index 000000000..61dae7c79
--- /dev/null
+++ b/dashboard/dashboard.xml
@@ -0,0 +1,254 @@
+
\ No newline at end of file
diff --git a/docker_compose/.env b/docker_compose/.env
index b597bbf40..4f5f509b9 100644
--- a/docker_compose/.env
+++ b/docker_compose/.env
@@ -1,13 +1,13 @@
# Deployment configuration
SC4SNMP_IMAGE=ghcr.io/splunk/splunk-connect-for-snmp/container
-SC4SNMP_TAG="1.11.0"
+SC4SNMP_TAG="1.12.0-beta.6"
SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH=
TRAPS_CONFIG_FILE_ABSOLUTE_PATH=
INVENTORY_FILE_ABSOLUTE_PATH=
COREFILE_ABS_PATH=
COREDNS_ADDRESS=172.28.0.255
-SC4SNMP_VERSION="1.11.0"
-
+SC4SNMP_VERSION="1.12.0-beta.6"
+IPv6_ENABLED=false
# Dependencies images
COREDNS_IMAGE=coredns/coredns
@@ -82,6 +82,7 @@ CHAIN_OF_TASKS_EXPIRY_TIME=500
# Traps configuration
SNMP_V3_SECURITY_ENGINE_ID=80003a8c04
TRAPS_PORT=162
+IPv6_TRAPS_PORT=2163
TRAP_LOG_LEVEL=INFO
# Scheduler configuration
diff --git a/docker_compose/docker-compose-coredns.yaml b/docker_compose/docker-compose-coredns.yaml
index 887991b76..dcabb1496 100644
--- a/docker_compose/docker-compose-coredns.yaml
+++ b/docker_compose/docker-compose-coredns.yaml
@@ -11,5 +11,5 @@ services:
volumes:
- '${COREFILE_ABS_PATH}:/Corefile'
networks:
- my_network:
+ sc4snmp_network:
ipv4_address: ${COREDNS_ADDRESS}
diff --git a/docker_compose/docker-compose-dependencies.yaml b/docker_compose/docker-compose-dependencies.yaml
index 539ac183a..73434af1c 100644
--- a/docker_compose/docker-compose-dependencies.yaml
+++ b/docker_compose/docker-compose-dependencies.yaml
@@ -10,7 +10,7 @@ services:
depends_on:
- coredns
networks:
- - my_network
+ - sc4snmp_network
dns:
- ${COREDNS_ADDRESS}
@@ -23,7 +23,7 @@ services:
depends_on:
- coredns
networks:
- - my_network
+ - sc4snmp_network
dns:
- ${COREDNS_ADDRESS}
mongo:
@@ -33,7 +33,7 @@ services:
depends_on:
- coredns
networks:
- - my_network
+ - sc4snmp_network
dns:
- ${COREDNS_ADDRESS}
volumes:
diff --git a/docker_compose/docker-compose-inventory.yaml b/docker_compose/docker-compose-inventory.yaml
index 1f1da8409..91cee276c 100644
--- a/docker_compose/docker-compose-inventory.yaml
+++ b/docker_compose/docker-compose-inventory.yaml
@@ -28,7 +28,7 @@ services:
- inventory-tmp:/tmp/:rw
restart: on-failure
networks:
- - my_network
+ - sc4snmp_network
dns:
- ${COREDNS_ADDRESS}
volumes:
diff --git a/docker_compose/docker-compose-network.yaml b/docker_compose/docker-compose-network.yaml
index ce09f5a6a..f7fa80a7e 100644
--- a/docker_compose/docker-compose-network.yaml
+++ b/docker_compose/docker-compose-network.yaml
@@ -1,7 +1,11 @@
version: '3.8'
networks:
- my_network:
+ sc4snmp_network:
+ name: sc4snmp_network
+ enable_ipv6: ${IPv6_ENABLED:-false}
ipam:
- driver: default
config:
- - subnet: 172.28.0.0/16
\ No newline at end of file
+ - subnet: 172.28.0.0/16
+ gateway: 172.28.0.1
+ - subnet: fd02::/64
+ gateway: fd02::1
\ No newline at end of file
diff --git a/docker_compose/docker-compose-scheduler.yaml b/docker_compose/docker-compose-scheduler.yaml
index 661dbe54a..f74c1e072 100644
--- a/docker_compose/docker-compose-scheduler.yaml
+++ b/docker_compose/docker-compose-scheduler.yaml
@@ -25,7 +25,7 @@ services:
- scheduler-tmp:/tmp/:rw
restart: on-failure
networks:
- - my_network
+ - sc4snmp_network
dns:
- ${COREDNS_ADDRESS}
volumes:
diff --git a/docker_compose/docker-compose-traps.yaml b/docker_compose/docker-compose-traps.yaml
index 5b1864f2c..1abba1b02 100644
--- a/docker_compose/docker-compose-traps.yaml
+++ b/docker_compose/docker-compose-traps.yaml
@@ -25,14 +25,20 @@ services:
- SPLUNK_HEC_INSECURESSL=${SPLUNK_HEC_INSECURESSL:-false}
- SPLUNK_HEC_PATH=${SPLUNK_HEC_PATH:-/services/collector}
- SNMP_V3_SECURITY_ENGINE_ID=${SNMP_V3_SECURITY_ENGINE_ID:-80003a8c04}
+ - PYSNMP_DEBUG=${PYSNMP_DEBUG}
+ - IPv6_ENABLED=${IPv6_ENABLED:-false}
image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest}
networks:
- - my_network
+ - sc4snmp_network
ports:
- mode: host
protocol: udp
published: ${TRAPS_PORT}
target: 2162
+ - mode: host
+ protocol: udp
+ published: ${IPv6_TRAPS_PORT}
+ target: 2163
restart: on-failure
secrets: []
volumes:
diff --git a/docker_compose/docker-compose-worker-poller.yaml b/docker_compose/docker-compose-worker-poller.yaml
index c36cc74df..8f52118cf 100644
--- a/docker_compose/docker-compose-worker-poller.yaml
+++ b/docker_compose/docker-compose-worker-poller.yaml
@@ -42,9 +42,11 @@ services:
- PROFILES_RELOAD_DELAY=${PROFILES_RELOAD_DELAY:-60}
- WORKER_CONCURRENCY=${WORKER_POLLER_CONCURRENCY:-2}
- PREFETCH_COUNT=${PREFETCH_POLLER_COUNT:-1}
+ - PYSNMP_DEBUG=${PYSNMP_DEBUG}
+ - IPv6_ENABLED=${IPv6_ENABLED:-false}
image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest}
networks:
- - my_network
+ - sc4snmp_network
restart: on-failure
secrets: []
volumes:
diff --git a/docker_compose/docker-compose-worker-sender.yaml b/docker_compose/docker-compose-worker-sender.yaml
index 3c0931934..f0040e6ae 100644
--- a/docker_compose/docker-compose-worker-sender.yaml
+++ b/docker_compose/docker-compose-worker-sender.yaml
@@ -41,6 +41,7 @@ services:
- PROFILES_RELOAD_DELAY=${PROFILES_RELOAD_DELAY:-60}
- WORKER_CONCURRENCY=${WORKER_SENDER_CONCURRENCY:-2}
- PREFETCH_COUNT=${PREFETCH_SENDER_COUNT:-1}
+ - PYSNMP_DEBUG=${PYSNMP_DEBUG}
depends_on:
- redis
- mongo
@@ -51,7 +52,7 @@ services:
- worker-sender-tmp:/tmp/:rw
restart: on-failure
networks:
- - my_network
+ - sc4snmp_network
dns:
- ${COREDNS_ADDRESS}
deploy:
diff --git a/docker_compose/docker-compose-worker-trap.yaml b/docker_compose/docker-compose-worker-trap.yaml
index fa4253774..89f61e54f 100644
--- a/docker_compose/docker-compose-worker-trap.yaml
+++ b/docker_compose/docker-compose-worker-trap.yaml
@@ -44,6 +44,8 @@ services:
- RESOLVE_TRAP_ADDRESS=${RESOLVE_TRAP_ADDRESS:-false}
- MAX_DNS_CACHE_SIZE_TRAPS=${MAX_DNS_CACHE_SIZE_TRAPS:-500}
- TTL_DNS_CACHE_TRAPS=${TTL_DNS_CACHE_TRAPS:-1800}
+ - PYSNMP_DEBUG=${PYSNMP_DEBUG}
+ - IPv6_ENABLED=${IPv6_ENABLED:-false}
depends_on:
- redis
- mongo
@@ -54,7 +56,7 @@ services:
- worker-trap-tmp:/tmp/:rw
restart: on-failure
networks:
- - my_network
+ - sc4snmp_network
dns:
- ${COREDNS_ADDRESS}
deploy:
diff --git a/docker_compose/manage_logs.py b/docker_compose/manage_logs.py
index c5d4ca8be..a7c28f675 100644
--- a/docker_compose/manage_logs.py
+++ b/docker_compose/manage_logs.py
@@ -6,6 +6,7 @@
import ruamel.yaml
DEPENDENCIES = ["snmp-mibserver", "redis", "mongo"]
+DOCKER_COMPOSE_DEPENDENCIES = "docker-compose-dependencies.yaml"
def human_bool(flag: Union[str, bool], default: bool = False) -> bool:
@@ -99,7 +100,7 @@ def create_logs(environment, path_to_compose_files):
try:
yaml2 = ruamel.yaml.YAML()
with open(
- os.path.join(path_to_compose_files, "docker-compose-dependencies.yaml")
+ os.path.join(path_to_compose_files, DOCKER_COMPOSE_DEPENDENCIES)
) as file:
yaml_file = yaml2.load(file)
@@ -108,7 +109,7 @@ def create_logs(environment, path_to_compose_files):
yaml_file["services"][service_name].update(template_yaml)
with open(
- os.path.join(path_to_compose_files, "docker-compose-dependencies.yaml"), "w"
+ os.path.join(path_to_compose_files, DOCKER_COMPOSE_DEPENDENCIES), "w"
) as file:
yaml2.dump(yaml_file, file)
except Exception as e:
@@ -142,7 +143,7 @@ def delete_logs(path_to_compose_files):
try:
with open(
- os.path.join(path_to_compose_files, "docker-compose-dependencies.yaml")
+ os.path.join(path_to_compose_files, DOCKER_COMPOSE_DEPENDENCIES)
) as file:
yaml2 = ruamel.yaml.YAML()
yaml_file = yaml2.load(file)
@@ -152,7 +153,7 @@ def delete_logs(path_to_compose_files):
yaml_file["services"][service_name]["logging"].pop("options")
with open(
- os.path.join(path_to_compose_files, "docker-compose-dependencies.yaml"), "w"
+ os.path.join(path_to_compose_files, DOCKER_COMPOSE_DEPENDENCIES), "w"
) as file:
yaml2.dump(yaml_file, file)
except Exception as e:
diff --git a/docker_compose/manage_secrets.py b/docker_compose/manage_secrets.py
index 321052ed4..89d1e274a 100644
--- a/docker_compose/manage_secrets.py
+++ b/docker_compose/manage_secrets.py
@@ -75,25 +75,7 @@ def create_secrets(
raise ValueError(f"Value {k} is not set")
# list for storing secrets configuration which should be added to docker-compose-secrets.yaml
- new_secrets = []
- # list for storing secrets configuration which should be added to docker-compose-worker-poller.yaml and
- # docker-compose-traps.yaml services
- new_secrets_in_workers = []
-
- for k, v in variables.items():
- if v:
- new_secrets.append(
- {
- "secret_name": f"{secret_name}_{k}",
- "secret_config": {"environment": f"{secret_name}_{k}"},
- }
- )
- new_secrets_in_workers.append(
- {
- "source": f"{secret_name}_{k}",
- "target": f"/app/secrets/snmpv3/{secret_name}/{k}",
- }
- )
+ new_secrets, new_secrets_in_workers = store_secrets(secret_name, variables)
try:
# Load docker-compose-secrets.yaml to a dictionary and update "secrets" section. If the same secret
@@ -115,70 +97,130 @@ def create_secrets(
secrets_file_ready = False
if make_change_in_worker_poller:
- # If the secret should be added to worker poller, load docker-compose-worker-poller.yaml to a dictionary and
- # update "secrets" section.
- try:
- with open(
- os.path.join(path_to_compose_files, DOCKER_COMPOSE_WORKER_POLLER)
- ) as file:
- worker_poller_file = yaml.load(file, Loader=yaml.FullLoader)
- if "secrets" not in worker_poller_file["services"]["worker-poller"]:
- worker_poller_file["services"]["worker-poller"]["secrets"] = []
- worker_poller_file["services"]["worker-poller"]["secrets"].extend(
- new_secrets_in_workers
- )
- worker_poller_file_ready = True
- except Exception:
- print(
- "Problem with editing docker-compose-worker-poller.yaml. Secret not added."
- )
- worker_poller_file_ready = False
+ worker_poller_file, worker_poller_file_ready = load_compose_worker_poller(
+ new_secrets_in_workers, path_to_compose_files
+ )
else:
+ worker_poller_file = {}
worker_poller_file_ready = True
if make_change_in_traps:
- # If the secret should be added to traps, load docker-compose-traps.yaml to a dictionary and
- # update "secrets" section.
- try:
- with open(
- os.path.join(path_to_compose_files, DOCKER_COMPOSE_TRAPS)
- ) as file:
- traps_file = yaml.load(file, Loader=yaml.FullLoader)
- if "secrets" not in traps_file["services"]["traps"]:
- traps_file["services"]["traps"]["secrets"] = []
- traps_file["services"]["traps"]["secrets"].extend(new_secrets_in_workers)
- traps_file_ready = True
- except Exception:
- print("Problem with editing docker-compose-traps.yaml. Secret not added.")
- traps_file_ready = False
+ traps_file, traps_file_ready = load_compose_traps(
+ new_secrets_in_workers, path_to_compose_files
+ )
else:
+ traps_file = {}
traps_file_ready = True
+ save_to_compose_files(
+ make_change_in_traps,
+ make_change_in_worker_poller,
+ path_to_compose_files,
+ secret_name,
+ secrets_file,
+ secrets_file_ready,
+ traps_file,
+ traps_file_ready,
+ variables,
+ worker_poller_file,
+ worker_poller_file_ready,
+ )
+
+
+def save_to_compose_files(
+ make_change_in_traps,
+ make_change_in_worker_poller,
+ path_to_compose_files,
+ secret_name,
+ secrets_file,
+ secrets_file_ready,
+ traps_file,
+ traps_file_ready,
+ variables,
+ worker_poller_file,
+ worker_poller_file_ready,
+):
if secrets_file_ready and worker_poller_file_ready and traps_file_ready:
# If all three files were loaded into dictionary and updated successfully,
# save the latest configuration to files.
- with open(
- os.path.join(path_to_compose_files, DOCKER_COMPOSE_SECRETS), "w"
- ) as file:
- yaml.dump(secrets_file, file, default_flow_style=False)
-
+ save_to_yaml_file(path_to_compose_files, DOCKER_COMPOSE_SECRETS, secrets_file)
with open(os.path.join(path_to_compose_files, ".env"), "a") as file:
for k, v in variables.items():
if v:
file.write(f"\n{secret_name}_{k}={v}")
-
if make_change_in_worker_poller:
- with open(
- os.path.join(path_to_compose_files, DOCKER_COMPOSE_WORKER_POLLER),
- "w",
- ) as file:
- yaml.dump(worker_poller_file, file, default_flow_style=False)
-
+ save_to_yaml_file(
+ path_to_compose_files, DOCKER_COMPOSE_WORKER_POLLER, worker_poller_file
+ )
if make_change_in_traps:
- with open(
- os.path.join(path_to_compose_files, DOCKER_COMPOSE_TRAPS), "w"
- ) as file:
- yaml.dump(traps_file, file, default_flow_style=False)
+ save_to_yaml_file(path_to_compose_files, DOCKER_COMPOSE_TRAPS, traps_file)
+
+
+def save_to_yaml_file(file_path, file_name, file_content):
+ with open(os.path.join(file_path, file_name), "w") as file:
+ yaml.dump(file_content, file, default_flow_style=False)
+
+
+def load_compose_traps(new_secrets_in_workers, path_to_compose_files):
+ # If the secret should be added to traps, load docker-compose-traps.yaml to a dictionary and
+ # update "secrets" section.
+ try:
+ with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE_TRAPS)) as file:
+ traps_file = yaml.load(file, Loader=yaml.FullLoader)
+ if "secrets" not in traps_file["services"]["traps"]:
+ traps_file["services"]["traps"]["secrets"] = []
+ traps_file["services"]["traps"]["secrets"].extend(new_secrets_in_workers)
+ traps_file_ready = True
+ except Exception:
+ print("Problem with editing docker-compose-traps.yaml. Secret not added.")
+ traps_file = {}
+ traps_file_ready = False
+ return traps_file, traps_file_ready
+
+
+def load_compose_worker_poller(new_secrets_in_workers, path_to_compose_files):
+ # If the secret should be added to worker poller, load docker-compose-worker-poller.yaml to a dictionary and
+ # update "secrets" section.
+ try:
+ with open(
+ os.path.join(path_to_compose_files, DOCKER_COMPOSE_WORKER_POLLER)
+ ) as file:
+ worker_poller_file = yaml.load(file, Loader=yaml.FullLoader)
+ if "secrets" not in worker_poller_file["services"]["worker-poller"]:
+ worker_poller_file["services"]["worker-poller"]["secrets"] = []
+ worker_poller_file["services"]["worker-poller"]["secrets"].extend(
+ new_secrets_in_workers
+ )
+ worker_poller_file_ready = True
+ except Exception:
+ print(
+ "Problem with editing docker-compose-worker-poller.yaml. Secret not added."
+ )
+ worker_poller_file = {}
+ worker_poller_file_ready = False
+ return worker_poller_file, worker_poller_file_ready
+
+
+def store_secrets(secret_name, variables):
+ new_secrets = []
+ # list for storing secrets configuration which should be added to docker-compose-worker-poller.yaml and
+ # docker-compose-traps.yaml services
+ new_secrets_in_workers = []
+ for k, v in variables.items():
+ if v:
+ new_secrets.append(
+ {
+ "secret_name": f"{secret_name}_{k}",
+ "secret_config": {"environment": f"{secret_name}_{k}"},
+ }
+ )
+ new_secrets_in_workers.append(
+ {
+ "source": f"{secret_name}_{k}",
+ "target": f"/app/secrets/snmpv3/{secret_name}/{k}",
+ }
+ )
+ return new_secrets, new_secrets_in_workers
def delete_secrets(
@@ -200,39 +242,13 @@ def delete_secrets(
for key in variables.keys():
secrets.append(f"{secret_name}_{key}")
- # Load docker-compose-secrets.yaml file to a dictionary and delete desired secrets
- with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE_SECRETS)) as file:
- secrets_file = yaml.load(file, Loader=yaml.FullLoader)
- for secret in secrets:
- if secret in secrets_file["secrets"]:
- del secrets_file["secrets"][secret]
+ secrets_file = load_compose_secrets(path_to_compose_files, secrets)
# Save the updated docker-compose-secrets.yaml configuration
- with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE_SECRETS), "w") as file:
- yaml.dump(secrets_file, file, default_flow_style=False)
+ save_to_yaml_file(path_to_compose_files, DOCKER_COMPOSE_SECRETS, secrets_file)
# Delete secrets from .env
- try:
- # Read lines from .env
- with open(os.path.join(path_to_compose_files, ".env")) as env_file:
- lines = env_file.readlines()
-
- with open(os.path.join(path_to_compose_files, ".env"), "w") as env_file:
- lines_to_write = []
- # If the environmental variable is NOT one of the secrets destined for deletion, add them to lines_to_write
- for line in lines:
- key = line.split("=")[0].strip()
- if key not in secrets:
- lines_to_write.append(line.strip())
-
- # Save each line to .env. The last line should be saved without a new line symbol
- for i, line in enumerate(lines_to_write):
- if i < len(lines_to_write) - 1:
- env_file.write(f"{line}\n")
- else:
- env_file.write(line)
- except Exception as e:
- print(f"Error: {e}")
+ delete_secrets_from_env(path_to_compose_files, secrets)
if make_change_in_worker_poller:
# Load docker-compose-worker-poller.yaml to dictionary and filter out secrets destined for deletion
@@ -248,11 +264,9 @@ def delete_secrets(
)
# Save updated docker-compose-worker-poller.yaml configuration
- with open(
- os.path.join(path_to_compose_files, DOCKER_COMPOSE_WORKER_POLLER),
- "w",
- ) as file:
- yaml.dump(worker_poller_file, file, default_flow_style=False)
+ save_to_yaml_file(
+ path_to_compose_files, DOCKER_COMPOSE_WORKER_POLLER, worker_poller_file
+ )
if make_change_in_traps:
# Load docker-compose-traps.yaml to dictionary and filter out secrets destined for deletion
@@ -266,10 +280,41 @@ def delete_secrets(
)
# Save updated docker-compose-traps.yaml configuration
- with open(
- os.path.join(path_to_compose_files, DOCKER_COMPOSE_TRAPS), "w"
- ) as file:
- yaml.dump(traps_file, file, default_flow_style=False)
+ save_to_yaml_file(path_to_compose_files, DOCKER_COMPOSE_TRAPS, traps_file)
+
+
+def delete_secrets_from_env(path_to_compose_files, secrets):
+ try:
+ # Read lines from .env
+ with open(os.path.join(path_to_compose_files, ".env")) as env_file:
+ lines = env_file.readlines()
+
+ with open(os.path.join(path_to_compose_files, ".env"), "w") as env_file:
+ lines_to_write = []
+ # If the environmental variable is NOT one of the secrets destined for deletion, add them to lines_to_write
+ for line in lines:
+ key = line.split("=")[0].strip()
+ if key not in secrets:
+ lines_to_write.append(line.strip())
+
+ # Save each line to .env. The last line should be saved without a new line symbol
+ for i, line in enumerate(lines_to_write):
+ if i < len(lines_to_write) - 1:
+ env_file.write(f"{line}\n")
+ else:
+ env_file.write(line)
+ except Exception as e:
+ print(f"Error: {e}")
+
+
+def load_compose_secrets(path_to_compose_files, secrets):
+ # Load docker-compose-secrets.yaml file to a dictionary and delete desired secrets
+ with open(os.path.join(path_to_compose_files, DOCKER_COMPOSE_SECRETS)) as file:
+ secrets_file = yaml.load(file, Loader=yaml.FullLoader)
+ for secret in secrets:
+ if secret in secrets_file["secrets"]:
+ del secrets_file["secrets"][secret]
+ return secrets_file
def main():
diff --git a/docs/architecture/design.md b/docs/architecture/design.md
new file mode 100644
index 000000000..dc4a9bcac
--- /dev/null
+++ b/docs/architecture/design.md
@@ -0,0 +1,33 @@
+# Architecture
+
+SC4SNMP is deployed using a Kubernetes distribution, typically MicroK8s,
+that's designed to be a low-touch experience for integration with sensitive
+edge network devices. It will typically be deployed in the same network
+management zone as the monitored devices and separated from Splunk by an
+existing firewall.
+
+![image](../images/sc4snmp_deployment.png)
+
+
+## High-level Design
+
+SC4SNMP has two main purposes. The first one is used to collect SNMP data from network
+devices according to planned schedules and the second one is responsible for listening to SNMP traps.
+
+![image](../images/sc4snmp_architecture.png)
+
+Diagram above present high level architecture of Splunk Connector for SNMP, it contains following components:
+
+- **UI** - user interface for configuring the SC4SNMP profiles, groups, and inventory. It is applying changes to
+ SC4SNMP by creating the inventory job.
+- **Poller** - responsible for getting selected data from SNMP agents in set periods of time. Celery is used for
+ planning the schedules and executing the incoming tasks, signaled from Redis as message broker.
+- **Trap** - responsible for listening and receiving trap notifications from SNMP agents. The listener is always
+ waiting for the messages coming on the specified port and passing them to the trap worker for further
+ processing.
+- **MIB Server** - responsible for serving MIBs to SNMP Workers and translating oids to varbinds.
+- **MongoDB** - used for storing configuration and state of the SC4SNMP.
+- **Inventory** - job used for updating the information about SC4SNMP configuration. It is run after every update to
+ the `values.yaml` file if polling is enabled.
+- **Sender** - responsible for sending data received from poller or trap workers to the Splunk HEC or OTel (SignalFx).
+
diff --git a/docs/planning.md b/docs/architecture/planning.md
similarity index 79%
rename from docs/planning.md
rename to docs/architecture/planning.md
index f2d0b236c..0d51a2bac 100644
--- a/docs/planning.md
+++ b/docs/architecture/planning.md
@@ -4,16 +4,6 @@ Splunk Connect for SNMP (SC4SNMP) is a solution that allows the customer
to get data from network devices and appliances when a more feature-complete solution, such as the Splunk Universal Forwarder, is not
available.
-## Architecture
-
-SC4SNMP is deployed using a Kubernetes distribution, typically MicroK8s,
-that's designed to be a low-touch experience for integration with sensitive
-edge network devices. It will typically be deployed in the same network
-management zone as the monitored devices and separated from Splunk by an
-existing firewall.
-
-![image](images/sc4snmp_deployment.png)
-
## Requirements
- A supported deployment of MicroK8s
@@ -30,8 +20,7 @@ existing firewall.
SC4SNMP to the Splunk destination.
- Splunk Enterprise/Cloud 8.x or newer and/or Splunk Infrastructure Monitoring
- (SignalFx)
-
+ (SignalFx)
## Planning Infrastructure
diff --git a/docs/bestpractices.md b/docs/bestpractices.md
deleted file mode 100644
index aa0c97786..000000000
--- a/docs/bestpractices.md
+++ /dev/null
@@ -1,258 +0,0 @@
-# Debug Splunk Connect for SNMP
-
-## Check when SNMP WALK was executed last time for the device
-1. [Configure Splunk OpenTelemetry Collector for Kubernetes](gettingstarted/sck-installation.md)
-2. Go to your Splunk and execute search: `index="em_logs" "Sending due task" "sc4snmp;;walk"`
-and replace with the pertinent IP Address.
-
-## Installing Splunk Connect for SNMP on Linux RedHat
-Installation of RedHat may be blocking ports required by microk8s. Installing microk8s on RedHat
-requires checking to see if the firewall is not blocking any of the [required microk8s ports](https://microk8s.io/docs/ports).
-
-### Accessing SC4SNMP logs
-
-SC4SNMP logs can be browsed in Splunk in `em_logs` index, provided that [sck-otel](gettingstarted/sck-installation.md)
-is installed. Logs can be also accessed directly in kubernetes using terminal.
-
-#### Accessing logs via Splunk
-If [sck-otel](gettingstarted/sck-installation.md) is installed, browse `em_logs` index. Logs can be further filtered
-for example by the sourcetype field. Example search command to get logs from poller:
-```
-index=em_logs sourcetype="kube:container:splunk-connect-for-snmp-worker-poller"
-```
-
-#### Accessing logs in kubernetes
-To access logs directly in kubernetes, first run `microk8s kubectl -n sc4snmp get pods`. This will output all pods:
-```
-NAME READY STATUS RESTARTS AGE
-snmp-splunk-connect-for-snmp-worker-trap-99f49c557-j9jwx 1/1 Running 0 29m
-snmp-splunk-connect-for-snmp-trap-56f75f9754-kmlgb 1/1 Running 0 29m
-snmp-splunk-connect-for-snmp-scheduler-7bb8c79855-rgjkj 1/1 Running 0 29m
-snmp-mibserver-784bd599fd-6xzfj 1/1 Running 0 29m
-snmp-splunk-connect-for-snmp-worker-poller-78b46d668f-59mv4 1/1 Running 0 29m
-snmp-splunk-connect-for-snmp-worker-sender-6f8496bfbf-cvt9l 1/1 Running 0 29m
-snmp-mongodb-7579dc7867-mlnst 2/2 Running 0 29m
-snmp-redis-master-0 1/1 Running 0 29m
-```
-
-Now select the desired pod and run `microk8s kubectl -n sc4snmp logs pod/` command. Example command to retrieve
-logs from `splunk-connect-for-snmp-worker-poller`:
-```
-microk8s kubectl -n sc4snmp logs pod/snmp-splunk-connect-for-snmp-worker-poller-78b46d668f-59mv4
-```
-
-## Issues
-
-### "Empty SNMP response message" problem
-If you see the following line in the worker's logs:
-
-```log
-[2022-01-04 11:44:22,553: INFO/ForkPoolWorker-1] Task splunk_connect_for_snmp.snmp.tasks.walk[8e62fc62-569c-473f-a765-ff92577774e5] retry: Retry in 3489s: SnmpActionError('An error of SNMP isWalk=True for a host 192.168.10.20 occurred: Empty SNMP response message')
-```
-that causes an infinite retry of the walk operation. Add `worker.ignoreEmptyVarbinds` parameter to `values.yaml` and set it to true.
-
-An example configuration for a worker in `values.yaml` is:
-
-```yaml
-worker:
- ignoreEmptyVarbinds: true
-```
-
-### "OID not increasing" problem
-In case you see the following line in worker's logs:
-
-```log
-[2022-01-04 11:44:22,553: INFO/ForkPoolWorker-1] Task splunk_connect_for_snmp.snmp.tasks.walk[8e62fc62-569c-473f-a765-ff92577774e5] retry: Retry in 3489s: SnmpActionError('An error of SNMP isWalk=True for a host 192.168.10.20 occurred: OID not increasing')
-```
-that causes infinite retry of walk operation, add `worker.ignoreNotIncreasingOid` array to `values.yaml` and fill with the addresses of hosts where the problem appears.
-
-An example configuration for a worker in `values.yaml` is:
-
-```yaml
-worker:
- ignoreNotIncreasingOid:
- - "127.0.0.1:164"
- - "127.0.0.6"
-```
-
-If you put in only the IP address (for example, `127.0.0.1`), then errors will be ignored for all of its devices (like `127.0.0.1:161`,
-`127.0.0.1:163`...). If you put the IP address and host as `{host}:{port}`, that means the error will be ignored only for this device.
-
-### Walking a device takes too much time
-
-See [Configure small walk profile](../configuration/configuring-profiles/#walk-profile) to enable the small walk functionality.
-
-### An error of SNMP isWalk=True blocks traffic on the SC4SNMP instance
-
-If you see many `An error of SNMP isWalk=True` errors in your logs, that means that there is a connection problem with the hosts you're polling from.
-Walk will retry multiple times, which will eventually cause a worker to be blocked while it retries. In that case, you might want to limit
-the maximum retry time. You can do this by setting the variable `worker.walkRetryMaxInterval`, for example:
-
-```yaml
-worker:
- walkRetryMaxInterval: 60
-```
-
-With the previous configuration, 'walk' will retry exponentially from 30 seconds until it reaches 60 seconds. The default value for `worker.walkRetryMaxInterval` is 180.
-
-### SNMP Rollover
-The Rollover problem is due to a finite stored integer value (especially when the value is 32-bit).
-When it reaches its maximum, it gets rolled down to 0 again. This causes a strange drop in Analytics data.
-The most common case of this issue is interface speed on high speed ports. As a solution to this problem, SNMPv2 SMI defined a new object type, counter64, for 64-bit counters, see https://www.cisco.com/c/en/us/support/docs/ip/simple-network-management-protocol-snmp/26007-faq-snmpcounter.html.
-Not all the devices support it, but if they do, poll the counter64 type OID instead of the counter32 one.
-For example, use `ifHCInOctets` instead of `ifInOctets`.
-
-If 64-bit counter is not supported on your device, you can write your own Splunk queries that calculate the shift based on
-the maximum integer value and the current state. The same works for values large enough that they don't fit into a 64-bit value.
-An example for an appropriate Splunk query would be the following:
-
-```
-| streamstats current=f last(ifInOctets) as p_ifInOctets last(ifOutOctets) as p_ifOutOctets by ifAlias
-| eval in_delta=(ifInOctets - p_ifInOctets)
-| eval out_delta=(ifOutOctets - p_ifOutOctets)
-| eval max=pow(2,64)
-| eval out = if(out_delta<0,((max+out_delta)*8/(5*60*1000*1000*1000)),(out_delta)*8/(5*60*1000*1000*1000))
-| timechart span=5m avg(in) AS in, avg(out) AS out by ifAlias
-```
-
-### Polling authentication errors
-
-#### Unknown USM user
-In case of polling SNMPv3 devices, `Unknown USM user` error suggests wrong username. Verify
-that the kubernetes secret with the correct username has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)).
-
-#### Wrong SNMP PDU digest
-In case of polling SNMPv3 devices, `Wrong SNMP PDU digest` error suggests wrong authentication key. Verify
-that the kubernetes secret with the correct authentication key has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)).
-
-#### No SNMP response received before timeout
-`No SNMP response received before timeout` error might have several root causes. Some of them are:
-
-- wrong device IP or port
-- SNMPv2c wrong community string
-- SNMPv3 wrong privacy key
-
-### "Field is immutable" error during helm upgrade
-
-```
-microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/charts/splunk-connect-for-snmp/ --namespace=sc4snmp --create-namespace
-Error: UPGRADE FAILED: cannot patch "snmp-splunk-connect-for-snmp-inventory" with kind Job: Job.batch "snmp-splunk-connect-for-snmp-inventory" is invalid: (...) : field is immutable
-```
-
-The immutable error is due to the limitation placed on an inventory job. As the SC4SNMP requires several checks before applying updates, it is designed to allow changes in the inventory task after 5 minutes.
-
-The status of the inventory can be checked with the following command:
-```
-microk8s kubectl -n sc4snmp get pods | grep inventory
-```
-If the command is not empty, wait and execute it again after the inventory job finishes. This is when it is no longer visible in the output.
-
-If the changes are required to be applied immediately, the previous inventory job can be deleted with the following command:
-```
-microk8s kubectl delete job/snmp-splunk-connect-for-snmp-inventory -n sc4snmp
-```
-The upgrade command can be executed again.
-
-### "The following profiles have invalid configuration" or "The following groups have invalid configuration" errors
-Following errors are examples of wrong configuration:
-```
-The following groups have invalid configuration and won't be used: ['group1']. Please check indentation and keywords spelling inside mentioned groups configuration.
-```
-```
-The following profiles have invalid configuration and won't be used: ['standard_profile', 'walk_profile']. Please check indentation and keywords spelling inside mentioned profiles configuration.
-```
-Errors above indicate, that the mentioned groups or profiles might have wrong indentation or some keywords were omitted or misspelled. Refer to [Configuring profiles](./configuration/configuring-profiles.md)
-or [Configuring Groups](./configuration/configuring-groups.md) sections to check how the correct configuration should look like.
-
-### Identifying Traps issues
-
-#### Wrong IP or port
-The first possible answer to why traps are not sent to Splunk is that SNMP agents send trap messages to the wrong IP
-address or port. To check what is the correct address of traps server, run the following command:
-
-```
-microk8s kubectl -n sc4snmp get services
-```
-
-This command should output similar data:
-```
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-snmp-redis-headless ClusterIP None 6379/TCP 113s
-snmp-mibserver ClusterIP 10.152.183.163 80/TCP 113s
-snmp-mongodb ClusterIP 10.152.183.118 27017/TCP 113s
-snmp-redis-master ClusterIP 10.152.183.61 6379/TCP 113s
-snmp-mongodb-metrics ClusterIP 10.152.183.50 9216/TCP 113s
-snmp-splunk-connect-for-snmp-trap LoadBalancer 10.152.183.190 114.241.233.134 162:32180/UDP 113s
-```
-
-Check the `EXTERNAL-IP` of `snmp-splunk-connect-for-snmp-trap` and the second port number for this service. In this case
-the full `snmp-splunk-connect-for-snmp-trap` address will be `114.241.233.134:32180`.
-
-
-In case agents send traps to the correct address, but there is still no data in the `netops` index, there might be some
-issues with credentials. These errors can be seen in logs of the `snmp-splunk-connect-for-snmp-trap` pod.
-
-#### Unknown SNMP community name encountered
-In case of using community string for authentication purposes, the following error should be expected if the arriving trap
-has a community string not configured in SC4SNMP:
-```
-2024-02-06 15:42:14,885 ERROR Security Model failure for device ('18.226.181.199', 42514): Unknown SNMP community name encountered
-```
-
-If this error occurs, check if the appropriate community is defined under `traps.communities` in `values.yaml`. See the
-following example of a `public` community configuration:
-```yaml
-traps:
- communities:
- public:
- communityIndex:
- contextEngineId:
- contextName:
- tag:
- securityName:
-```
-
-#### Unknown SNMP security name encountered
-
-While sending SNMP v3 traps in case of wrong username or engine id configuration, the following error should be expected:
-```
-2024-02-06 15:42:14,091 ERROR Security Model failure for device ('18.226.181.199', 46066): Unknown SNMP security name encountered
-```
-
-If this error occurs, verify that the kubernetes secret with the correct username has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)).
-After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. Check that the correct snmp engine id
-is configured under `traps.securityEngineId`. See the following example of a `values.yaml` with configured secret and engine id:
-```yaml
-traps:
- usernameSecrets:
- - my-secret-name
- securityEngineId:
- - "090807060504030201"
-```
-
-#### Authenticator mismatched
-
-While sending SNMP v3 traps in case of wrong authentication protocol or password configuration, the following error should be expected:
-```
-2024-02-06 15:42:14,642 ERROR Security Model failure for device ('18.226.181.199', 54806): Authenticator mismatched
-```
-If this error occurs, verify that the kubernetes secret with the correct authentication protocol and password has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)).
-After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. See the following example of a `values.yaml` with configured secret:
-```yaml
-traps:
- usernameSecrets:
- - my-secret-name
-```
-
-#### Ciphering services not available or ciphertext is broken
-While sending SNMP v3 traps in case of wrong privacy protocol or password configuration, the following error should be expected:
-```
-2024-02-06 15:42:14,780 ERROR Security Model failure for device ('18.226.181.199', 48249): Ciphering services not available or ciphertext is broken
-```
-If this error occurs, verify that the kubernetes secret with the correct privacy protocol and password has been created ([SNMPv3 configuration](configuration/snmpv3-configuration.md)).
-After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. See the following example of a `values.yaml` with configured secret:
-```yaml
-traps:
- usernameSecrets:
- - my-secret-name
-```
diff --git a/docs/configuration/poller-configuration.md b/docs/configuration/poller-configuration.md
index 0e58897f0..ffe032528 100644
--- a/docs/configuration/poller-configuration.md
+++ b/docs/configuration/poller-configuration.md
@@ -31,8 +31,8 @@ poller:
NOTE: The header's line (`address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete`) is necessary for the correct execution of SC4SNMP. Do not remove it.
### Define log level
-The log level for poller can be set by changing the value for the key `logLevel`. The allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`.
-The default value is `WARNING`.
+The log level for poller can be set by changing the value for the key `logLevel`. The allowed values are: `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` or `FATAL`.
+The default value is `INFO`.
### Define usernameSecrets
Secrets are required to run SNMPv3 polling. To add v3 authentication details, create the k8s Secret object: [SNMPv3 Configuration](snmpv3-configuration.md), and put its name in `poller.usernameSecrets`.
diff --git a/docs/configuration/trap-configuration.md b/docs/configuration/trap-configuration.md
index e321615fa..3f5eb2e58 100644
--- a/docs/configuration/trap-configuration.md
+++ b/docs/configuration/trap-configuration.md
@@ -94,6 +94,13 @@ See the following example:
traps:
loadBalancerIP: 10.202.4.202
```
+If you have enabled the Ipv6 you need to pass IP addresses for both IPv4 and IPv6.
+See the following example:
+
+```yaml
+traps:
+ loadBalancerIP: 10.202.4.202,2001:0DB8:AC10:FE01:0000:0000:0000:0001
+```
If you want to use the SC4SNMP trap receiver in K8S cluster, configure `NodePort` instead. Use the following configuration:
diff --git a/docs/configuration/values-params-description.md b/docs/configuration/values-params-description.md
index 8d06ba782..9fd069a5f 100644
--- a/docs/configuration/values-params-description.md
+++ b/docs/configuration/values-params-description.md
@@ -63,6 +63,9 @@ Detailed documentation about configuring sim can be found in [Splunk Infrastruct
| `autoscaling.maxReplicas` | Maximum number of running pods when autoscaling is enabled | |
| `autoscaling.targetCPUUtilizationPercentage` | CPU % threshold that must be exceeded on pods to spawn another replica | |
| `autoscaling.targetMemoryUtilizationPercentage` | Memory % threshold that must be exceeded on pods to spawn another replica | |
+| `podAntiAffinity` | [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | `soft` |
+| `nodeSelector` | [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) | |
+
## Scheduler
@@ -161,10 +164,14 @@ Detailed documentation about configuring traps can be found in [Traps](../config
| `service.usemetallb` | Enables using metallb | `true` |
| `service.metallbsharingkey` | Sets metallb.universe.tf/allow-shared-ip annotation in trap service | `splunk-connect` |
| `service.type` | [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) | `LoadBalancer` |
-| `service.port` | Port of the service to use | `162` |
+| `service.port` | Port of the service to use for IPv4 | `162` |
| `service.nodePort` | Port when the `service.type` is `nodePort` | `30000` |
| `service.externalTrafficPolicy` | Controls how Kubernetes routes traffic | `Local` |
-| `loadBalancerIP` | Sets loadBalancer IP address in the metallb pool | |
+| `service.ipv6Port` | Port of the service to use for IPv6 | `162` |
+| `service.ipv6NodePort` | Port when the `service.type` is `nodePort` and IPv6 is enabled | `2163` |
+| `loadBalancerIP` | Sets loadBalancer IP address in the metallb pool | `30001` |
+| `ipFamilyPolicy` | Specifies if the service is dual stack or single stack | `SingleStack` |
+| `ipFamilies` | Defines the address families used for chosen `ipFamilyPolicy` | `IPv4` |
| `resources` | CPU and memory limits and requests for pod | |
| `autoscaling.enabled` | Enables autoscaling for pods | `false` |
| `autoscaling.minReplicas` | Minimum number of running pods when autoscaling is enabled | `1` |
diff --git a/docs/dashboard.md b/docs/dashboard.md
new file mode 100644
index 000000000..0df053cf0
--- /dev/null
+++ b/docs/dashboard.md
@@ -0,0 +1,60 @@
+# Dashboard
+
+Using dashboard you can monitor SC4SNMP and be sure that is healthy and working correctly.
+
+
+## Presetting
+
+!!! info
+ Dashboard compatible with SC4SNMP 1.11+
+
+1. [Create metrics indexes](gettingstarted/splunk-requirements.md#requirements-for-splunk-enterprise-or-enterprise-cloud) in Splunk.
+2. Enable metrics logging for your runtime:
+ * For K8S install [Splunk OpenTelemetry Collector for K8S](gettingstarted/sck-installation.md)
+ * For docker-compose use [Splunk logging driver for docker](dockercompose/9-splunk-logging.md)
+
+## Install dashboard
+
+1. In Splunk platform open **Search -> Dashboards**.
+2. Click on **Create New Dashboard** and make an empty dashboard. Be sure to choose Classic Dashboards.
+3. In the **Edit Dashboard** view, go to Source and replace the initial xml with the contents of **dashboard.xml**. This file you cand find on [release page](https://github.com/splunk/splunk-connect-for-snmp/releases) for your version in attachments.
+4. Save your changes. Your dashboard is ready to use.
+
+
+## Metrics explanation
+
+### Polling dashboards
+
+To check that polling on your device is working correctly first of all check **SNMP schedule of polling tasks** dashboard.
+Using this chart you can understand when SC4SNMP scheduled polling for your SNMP device last time. The process works if it runs regularly.
+
+After double-checking that SC4SNMP scheduled polling tasks for your SNMP device we need to be sure that polling is working.
+For that look at another dashboard **SNMP polling status** and if everything is working you will see only **succeeded** status of polling.
+If something is going wrong you will see also another status (like on screenshot), then use [troubleshooting docs for that](bestpractices.md)
+
+![Polling dashboards](images/dashboard/polling_dashboard.png)
+
+### Walk dashboards
+
+To check that walk on your device is working correctly first of all check **SNMP schedule of walk tasks** dashboard.
+Using this chart you can understand when SC4SNMP scheduled walk for your SNMP device last time. The process works if it runs regularly.
+
+After double-checking that SC4SNMP scheduled walk tasks for your SNMP device we need to be sure walk is working.
+For that look at another dashboard **SNMP walk status** and if everything is working you will see only **succeeded** status of walk.
+If something is going wrong you will see also another status (like on screenshot), then use [troubleshooting docs for that](bestpractices.md)
+
+![Walk dashboards](images/dashboard/walk_dashboard.png)
+
+### Trap dashboards
+
+First of all check **SNMP traps authorisation** dashboard, if you see only **succeeded** status it means that authorisation is configured correctly, otherwise please use [troubleshooting docs for that](bestpractices.md#identifying-traps-issues).
+
+After checking that we have not any authorisation traps issues we can check that trap tasks are working correctly. For that we need to go **SNMP trap status** dashboard, if we have only **succeeded** status it means that everything is working, otherwise we will see information with another status.
+
+![Trap dashboards](images/dashboard/trap_dashboard.png)
+
+### Other dashboards
+
+We also have tasks that will be a callback for walk and poll. For example **send** will publish result in Splunk. We need to be sure that after successful walk and poll this callbacks finished. Please check that we have only successful status for this tasks.
+
+![Other dashboards](images/dashboard/other_dashboard.png)
\ No newline at end of file
diff --git a/docs/dockercompose/10-enable-ipv6.md b/docs/dockercompose/10-enable-ipv6.md
new file mode 100644
index 000000000..6d73ea32a
--- /dev/null
+++ b/docs/dockercompose/10-enable-ipv6.md
@@ -0,0 +1,17 @@
+# Enabling IPv6 for SC4SNMP
+
+Default installation of SC4SNMP does not support polling or receiving trap notifications from IPv6 addresses.
+To enable IPv6, follow instruction below.
+
+## Docker
+
+Older versions of Docker do not support IPv6 or have know issues with IPv6 configuration.
+To avoid any problem with configuring the network, it is recommended to use the latest version of Docker.
+
+To enable IPv6 for SC4SNMP, set `IPv6_ENABLED` variable to `true` in `.env` file.
+The default subnet used for SC4SNMP network in docker is `fd02::/64`, this and other network configuration can be
+changed in the `docker-compose-network.yml` file.
+
+Default trap port for notifications for IPv6 is `2163`. You can change it to any other port if needed with `IPv6_TRAPS_PORT` parameter in `.env` file.
+
+For more information about IPv6 networking in docker, you can check the [official Docker documentation](https://docs.docker.com/engine/daemon/ipv6/).
\ No newline at end of file
diff --git a/docs/dockercompose/6-env-file-configuration.md b/docs/dockercompose/6-env-file-configuration.md
index 283b88af7..e3c3caa72 100644
--- a/docs/dockercompose/6-env-file-configuration.md
+++ b/docs/dockercompose/6-env-file-configuration.md
@@ -14,6 +14,8 @@ Inside the directory with the docker compose files, there is a `.env`. Variables
| `COREFILE_ABS_PATH` | Absolute path to Corefile used by coreDNS. Default Corefile can be found inside the `docker_compose` |
| `COREDNS_ADDRESS` | IP address of the coredns inside docker network. Shouldn’t be changed |
| `SC4SNMP_VERSION` | Version of SC4SNMP |
+| `IPv6_ENABLED` | Enable receiving traps and polling from IPv6 devices |
+
## Images of dependencies
@@ -30,22 +32,22 @@ Inside the directory with the docker compose files, there is a `.env`. Variables
## Splunk instance
-| Variable | Description |
-|-------------------------------------|-----------------------------------------------------------------------------------------------------------------------|
-| `SPLUNK_HEC_HOST` | IP address or a domain name of a Splunk instance to send data to |
-| `SPLUNK_HEC_PROTOCOL` | The protocol of the HEC endpoint: `https` or `http` |
-| `SPLUNK_HEC_PORT` | The port of the HEC endpoint |
-| `SPLUNK_HEC_TOKEN` | Splunk HTTP Event Collector token |
-| `SPLUNK_HEC_INSECURESSL` | Whether to skip checking the certificate of the HEC endpoint when sending data over HTTPS |
-| `SPLUNK_SOURCETYPE_TRAPS` | Splunk sourcetype for trap events |
-| `SPLUNK_SOURCETYPE_POLLING_EVENTS` | Splunk sourcetype for non-metric polling events |
-| `SPLUNK_SOURCETYPE_POLLING_METRICS` | Splunk sourcetype for metric polling events |
-| `SPLUNK_HEC_INDEX_EVENTS` | Name of the Splunk event index |
-| `SPLUNK_HEC_INDEX_METRICS` | Name of the Splunk metrics index |
-| `SPLUNK_HEC_PATH` | Path for the HEC endpoint |
-| `SPLUNK_AGGREGATE_TRAPS_EVENTS` | When set to true makes traps events collected as one event inside splunk |
-| `IGNORE_EMPTY_VARBINDS` | Details can be found in [empty snmp response message issue](../bestpractices.md#empty-snmp-response-message-problem) |
-| `SPLUNK_LOG_INDEX` | Event index in Splunk where logs from docker containers would be sent |
+| Variable | Description |
+|-------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------|
+| `SPLUNK_HEC_HOST` | IP address or a domain name of a Splunk instance to send data to |
+| `SPLUNK_HEC_PROTOCOL` | The protocol of the HEC endpoint: `https` or `http` |
+| `SPLUNK_HEC_PORT` | The port of the HEC endpoint |
+| `SPLUNK_HEC_TOKEN` | Splunk HTTP Event Collector token |
+| `SPLUNK_HEC_INSECURESSL` | Whether to skip checking the certificate of the HEC endpoint when sending data over HTTPS |
+| `SPLUNK_SOURCETYPE_TRAPS` | Splunk sourcetype for trap events |
+| `SPLUNK_SOURCETYPE_POLLING_EVENTS` | Splunk sourcetype for non-metric polling events |
+| `SPLUNK_SOURCETYPE_POLLING_METRICS` | Splunk sourcetype for metric polling events |
+| `SPLUNK_HEC_INDEX_EVENTS` | Name of the Splunk event index |
+| `SPLUNK_HEC_INDEX_METRICS` | Name of the Splunk metrics index |
+| `SPLUNK_HEC_PATH` | Path for the HEC endpoint |
+| `SPLUNK_AGGREGATE_TRAPS_EVENTS` | When set to true makes traps events collected as one event inside splunk |
+| `IGNORE_EMPTY_VARBINDS` | Details can be found in [empty snmp response message issue](../troubleshooting/polling-issues.md#empty-snmp-response-message-problem) |
+| `SPLUNK_LOG_INDEX` | Event index in Splunk where logs from docker containers would be sent |
## Workers
@@ -110,6 +112,7 @@ Inside the directory with the docker compose files, there is a `.env`. Variables
|------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `SNMP_V3_SECURITY_ENGINE_ID` | SNMPv3 TRAPs require the configuration SNMP Engine ID of the TRAP sending application for the USM users table of the TRAP receiving application for each USM user, for example: SNMP_V3_SECURITY_ENGINE_ID=80003a8c04,aab123456 |
| `TRAPS_PORT` | External port exposed for traps server |
+| `IPv6_TRAPS_PORT` | External port exposed for traps server for IPv6 |
## Scheduler
diff --git a/docs/gettingstarted/enable-ipv6.md b/docs/gettingstarted/enable-ipv6.md
new file mode 100644
index 000000000..954f7bb41
--- /dev/null
+++ b/docs/gettingstarted/enable-ipv6.md
@@ -0,0 +1,65 @@
+# Enabling IPv6 for SC4SNMP
+
+Default installation of SC4SNMP does not support polling or receiving trap notifications from IPv6 addresses. To enable IPv6, follow instruction below.
+
+## Microk8s
+To configure dual-stack network on microk8s follow instructions at [Microk8s page](https://microk8s.io/docs/how-to-dual-stack).
+After completing the steps, you can follow the instruction at [Microk8s installation on Ubuntu](mk8s/k8s-microk8s.md#microk8s-installation-on-ubuntu)
+to install microk8s.
+
+## Calico
+The default CNI used for microk8s is Calico. For pods to be able to reach internet over IPv6, you need to enable
+the `natOutgoing` parameter in ipv6 ip pool configuration from calico.
+To set it create the yaml file with the following content:
+```
+# calico-ippool.yaml
+---
+apiVersion: crd.projectcalico.org/v1
+kind: IPPool
+metadata:
+ name: default-ipv6-ippool
+spec:
+ natOutgoing: true
+```
+You can check with command `microk8s kubectl get ippools -n kube-system` the default name of the ip pool for IPv6. If it differs from `default-ipv6-ippool` you need to change the name in the yaml file.
+Then apply the configuration with the following command:
+```
+microk8s kubectl apply -f calico-ippool.yaml
+```
+
+After those changes you can restart the microk8s fot the changes to be applied with the following commands:
+```
+microk8s stop
+microk8s start
+```
+
+## Metallb
+As of version `1.30` of microk8s, Metallb add-on does not support passing the IPv6 addresses in enable command. To
+add the IPv6 addresses to your Metallb configuration, you can prepare the yaml file with configuration like below:
+```
+# addresspool.yaml
+---
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: default-addresspool
+ namespace: metallb-system
+spec:
+ addresses:
+ - 1.1.1.1/32
+ - 2001:0db8:ac10:fe01:0000:0000:0000:0001/128
+```
+You can check with command `microk8s kubectl get ipaddresspool -n metallb-system` the default name of the ip address pool created in metallb. If it differs from `default-addresspool` you need to change the name in the yaml file.
+You can add the single ip or subnets for both IPv4 and IPv6 under `spec.addresses` section. After preparing the yaml file, apply the configuration with the following command:
+```
+microk8s kubectl apply -f addresspool.yaml
+```
+
+## SC4SNMP
+To configure traps to receive notification from IPv4 and IPv6 addresses, you need to add the following configuration to the `values.yaml` file:
+```
+traps:
+ ipFamilyPolicy: RequireDualStack
+ ipFamilies: ["IPv4", "IPv6"]
+```
+Default trap port for notifications for IPv6 is `2163`. You can change it to any other port if needed with `traps.service.ipv6Port` parameter.
\ No newline at end of file
diff --git a/docs/gettingstarted/mk8s/k8s-microk8s-scaling.md b/docs/gettingstarted/mk8s/k8s-microk8s-scaling.md
new file mode 100644
index 000000000..2c1b0abc3
--- /dev/null
+++ b/docs/gettingstarted/mk8s/k8s-microk8s-scaling.md
@@ -0,0 +1,203 @@
+# Scaling SNMP with microk8s
+
+The following guide is to present how to bootstrap master and replica nodes for microk8s cluster and to explore the possibilities of scaling SC4SNMP.
+
+## Scaling criteria
+
+Below is the formula that can help with deciding when to scale the system.
+
+`2 * periodic_task_exec_time * inventory_size >= workers_count * task_period`
+
+where:
+
+* `inventory_size` - How many items we have on inventory (`values.yaml`).
+* `workers_count` - How many workers for `polling` / `walk` we have (pod workers).
+* `task_period` - `walk` / `polling` period time (`values.yaml`).
+* `periodic_task_exec_time` - Execution time of `polling` / `walk` task (metrics at screenshot).
+
+![task exec time](../../images/sc4snmp_task_execution.png)
+
+If the left side of equation is higher you need to scale `workers_count` or increase `task_period`.
+
+
+## Make microk8s cluster
+
+### Bootstrap master node
+
+1. Setup master node using following [guide](./k8s-microk8s.md).
+
+2. Generate joining token for replicas:
+
+```bash
+microk8s add-node
+```
+
+3. After running `add-node` on stdout you will get a command (like `microk8s join :25000/`) that you need to remember.
+
+### Bootstrap replica nodes
+
+1. Installing microk8s on replica node:
+
+```bash
+sudo snap install microk8s --classic --channel=1.30/stable
+sudo usermod -a -G microk8s $USER
+sudo chown -f -R $USER ~/.kube
+su - $USER
+```
+
+2. After running `add-node` on **master node** you will get on stdout command that you need to run on your **replica node**:
+
+```bash
+microk8s join :25000/
+```
+
+3. Check that replica joined cluster:
+
+```bash
+microk8s kubectl get nodes
+```
+
+New nodes should appear:
+
+```bash
+NAME STATUS ROLES AGE VERSION
+i-05ecfbf799e480188 Ready 25h v1.30.5
+i-0733cb329576e6c78 Ready 25h v1.30.5
+i-0b27bcc06fc5c660e Ready 25h v1.30.5
+```
+
+## Scaling SC4SNMP
+
+1. [Install SC4SNMP](../sc4snmp-installation.md) if it is not installed yet.
+
+2. Add `worker` section on `values.yaml`:
+
+```yaml
+worker:
+ poller:
+ replicaCount: 4
+ trap:
+ replicaCount: 4
+ sender:
+ replicaCount: 4
+```
+
+3. Add `traps` replica count in `values.yaml`:
+
+```yaml
+traps:
+ replicaCount: 4
+```
+
+4. Redeploy SC4SNMP:
+
+```bash
+microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace
+```
+
+5. Check that SC4SNMP has been scaled:
+
+```bash
+microk8s kubectl get pods -n sc4snmp
+```
+
+You should get 4 replicas for each worker and traps service:
+
+```bash
+NAME READY STATUS RESTARTS AGE
+snmp-mibserver-5df74fb678-zkj9m 1/1 Running 0 25h
+snmp-mongodb-6dc5c4f74d-xg6p7 2/2 Running 0 25h
+snmp-redis-master-0 1/1 Running 0 25h
+snmp-splunk-connect-for-snmp-inventory-k9t87 0/1 Completed 0 3m
+snmp-splunk-connect-for-snmp-scheduler-76848cf748-57qbx 1/1 Running 0 25h
+snmp-splunk-connect-for-snmp-trap-9f55664c4-9dv7d 1/1 Running 0 3m1s
+snmp-splunk-connect-for-snmp-trap-9f55664c4-crgld 1/1 Running 0 3m1s
+snmp-splunk-connect-for-snmp-trap-9f55664c4-sb768 1/1 Running 0 25h
+snmp-splunk-connect-for-snmp-trap-9f55664c4-tkhcp 1/1 Running 0 3m1s
+snmp-splunk-connect-for-snmp-worker-poller-7487956697-4hvpl 1/1 Running 0 21h
+snmp-splunk-connect-for-snmp-worker-poller-7487956697-8bvnn 1/1 Running 0 3m1s
+snmp-splunk-connect-for-snmp-worker-poller-7487956697-9dfgt 1/1 Running 0 3m1s
+snmp-splunk-connect-for-snmp-worker-poller-7487956697-hlhvz 1/1 Running 0 24h
+snmp-splunk-connect-for-snmp-worker-sender-657589666f-979d2 1/1 Running 0 3m1s
+snmp-splunk-connect-for-snmp-worker-sender-657589666f-mrvg9 1/1 Running 0 3m1s
+snmp-splunk-connect-for-snmp-worker-sender-657589666f-qtcr8 1/1 Running 0 21h
+snmp-splunk-connect-for-snmp-worker-sender-657589666f-tc8sv 1/1 Running 0 24h
+snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-6fbs2 1/1 Running 0 24h
+snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-kdcdb 1/1 Running 0 3m1s
+snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-sfxvb 1/1 Running 0 3m
+snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-xmmwv 1/1 Running 0 21h
+```
+
+## Autoscaling SC4SNMP
+
+1. [Install SC4SNMP](../sc4snmp-installation.md) if it is not installed yet.
+
+2. Add autoscaling options to `values.yaml`:
+
+```yaml
+worker:
+ poller:
+ autoscaling:
+ enabled: true
+ minReplicas: 5
+ maxReplicas: 10
+ trap:
+ autoscaling:
+ enabled: true
+ minReplicas: 5
+ maxReplicas: 10
+ sender:
+ autoscaling:
+ enabled: true
+ minReplicas: 5
+ maxReplicas: 10
+
+traps:
+ autoscaling:
+ enabled: true
+ minReplicas: 5
+ maxReplicas: 10
+```
+
+3. Redeploy SC4SNMP:
+
+```bash
+microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/splunk-connect-for-snmp --namespace=sc4snmp --create-namespace
+```
+
+4. Checked that SC4SNMP scaled:
+
+```bash
+microk8s kubectl get po -n sc4snmp
+```
+
+After scaling of each worker and trap service 5-10 instances will appear:
+
+```bash
+NAME READY STATUS RESTARTS AGE
+snmp-mibserver-6fdcdf9ddd-7bvmj 1/1 Running 0 25h
+snmp-mongodb-6dc5c4f74d-6b7mf 2/2 Running 0 25h
+snmp-redis-master-0 1/1 Running 0 25h
+snmp-splunk-connect-for-snmp-inventory-sssgs 0/1 Completed 0 3m37s
+snmp-splunk-connect-for-snmp-scheduler-5fcb6dcb44-r79ff 1/1 Running 0 25h
+snmp-splunk-connect-for-snmp-trap-5788bc498c-62xsq 1/1 Running 0 2m10s
+snmp-splunk-connect-for-snmp-trap-5788bc498c-bmlhg 1/1 Running 0 2m10s
+snmp-splunk-connect-for-snmp-trap-5788bc498c-p7mkq 1/1 Running 0 2m10s
+snmp-splunk-connect-for-snmp-trap-5788bc498c-t8q9c 1/1 Running 0 2m10s
+snmp-splunk-connect-for-snmp-trap-5788bc498c-xjjp2 1/1 Running 0 24h
+snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-25tbf 1/1 Running 0 16m
+snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-dc6zr 1/1 Running 0 16m
+snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-g7vpr 1/1 Running 0 16m
+snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-gdkgq 1/1 Running 0 16m
+snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-pg6cj 1/1 Running 0 24h
+snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-56h9w 1/1 Running 0 24h
+snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-hr54w 1/1 Running 0 16m
+snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-j7wcn 1/1 Running 0 16m
+snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-sgsdg 0/1 Pending 0 16m
+snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-xrpfx 1/1 Running 0 16m
+snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-79x2l 0/1 Pending 0 16m
+snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-br7pf 1/1 Running 0 24h
+snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-cnmh9 0/1 Pending 0 16m
+snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-dhdgg 1/1 Running 0 16m
+snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-wcwq5 0/1 Pending 0 16m
+```
\ No newline at end of file
diff --git a/docs/gettingstarted/mk8s/k8s-microk8s.md b/docs/gettingstarted/mk8s/k8s-microk8s.md
index ba87eddd7..2df38aacb 100644
--- a/docs/gettingstarted/mk8s/k8s-microk8s.md
+++ b/docs/gettingstarted/mk8s/k8s-microk8s.md
@@ -19,6 +19,11 @@ Three node minimum per node:
The following quick start guidance is based on Ubuntu 20.04LTS with MicroK8s and internet access. See other deployment options
in the MicroK8s [documentation](https://microk8s.io/docs), including offline and with proxy.
+## Enabling IPv6
+
+If you plan to poll or receive trap notifications from IPv6 addresses, firstly check the instructions for [enabling
+IPv6](../enable-ipv6.md).
+
## Install MicroK8s using Snap
```bash
@@ -37,17 +42,6 @@ Wait for Installation of Mk8S to complete:
microk8s status --wait-ready
```
-## Add nodes (optional)
-
-* Repeat the steps above for each additional node (with a minimum of 3 nodes).
-* On the first node, use the following command to see the instructions to join:
-
-```bash
-microk8s add-node
-```
-
-* On each additional node, use the output from the command above.
-
## Install required services for SC4SNMP
The following commands can be issued from any one node in a cluster:
@@ -82,3 +76,7 @@ the same as the primary IP.
microk8s enable metallb
microk8s status --wait-ready
```
+
+## Add nodes (optional)
+
+If you need cluster mode please use following [guide](k8s-microk8s-scaling.md#make-microk8s-cluster).
\ No newline at end of file
diff --git a/docs/gettingstarted/sc4snmp-installation.md b/docs/gettingstarted/sc4snmp-installation.md
index ca8e9488c..8753c031f 100644
--- a/docs/gettingstarted/sc4snmp-installation.md
+++ b/docs/gettingstarted/sc4snmp-installation.md
@@ -5,6 +5,10 @@ for single node non-HA deployments. It does not have resource requests and limit
See the mongo, redis, scheduler, worker, and traps configuration sections for guidance
on production configuration.
+## Installing Splunk Connect for SNMP on Linux RedHat
+Installation of RedHat may be blocking ports required by microk8s. Installing microk8s on RedHat
+requires checking to see if the firewall is not blocking any of the [required microk8s ports](https://microk8s.io/docs/ports).
+
## Installation process
@@ -180,7 +184,7 @@ index="netops" sourcetype="sc4snmp:event"
NOTE: Before polling starts, SC4SNMP must perform the SNMP WALK process on the device. It is run the first time after configuring the new device, and then during the run time in every `walk_interval`.
Its purpose is to gather all the data and provide meaningful context for the polling records. For example, it might report that your device is so large that the walk takes too long, so the scope of walking needs to be limited.
-In such cases, enable the small walk. See [walk takes too much time](../../bestpractices/#walking-a-device-takes-too-much-time).
+In such cases, enable the small walk. See [walk takes too much time](../../troubleshooting/polling-issues/#walking-a-device-takes-too-much-time).
When the walk finishes, events appear in Splunk.
## Next Steps
diff --git a/docs/images/dashboard/other_dashboard.png b/docs/images/dashboard/other_dashboard.png
new file mode 100644
index 000000000..2a32523a4
Binary files /dev/null and b/docs/images/dashboard/other_dashboard.png differ
diff --git a/docs/images/dashboard/polling_dashboard.png b/docs/images/dashboard/polling_dashboard.png
new file mode 100644
index 000000000..ea23840b0
Binary files /dev/null and b/docs/images/dashboard/polling_dashboard.png differ
diff --git a/docs/images/dashboard/trap_dashboard.png b/docs/images/dashboard/trap_dashboard.png
new file mode 100644
index 000000000..61e5cb2bd
Binary files /dev/null and b/docs/images/dashboard/trap_dashboard.png differ
diff --git a/docs/images/dashboard/walk_dashboard.png b/docs/images/dashboard/walk_dashboard.png
new file mode 100644
index 000000000..053b65c9a
Binary files /dev/null and b/docs/images/dashboard/walk_dashboard.png differ
diff --git a/docs/images/sc4snmp_architecture.png b/docs/images/sc4snmp_architecture.png
new file mode 100644
index 000000000..a79189d28
Binary files /dev/null and b/docs/images/sc4snmp_architecture.png differ
diff --git a/docs/images/sc4snmp_task_execution.png b/docs/images/sc4snmp_task_execution.png
new file mode 100644
index 000000000..50090ba8b
Binary files /dev/null and b/docs/images/sc4snmp_task_execution.png differ
diff --git a/docs/troubleshooting/configuring-logs.md b/docs/troubleshooting/configuring-logs.md
new file mode 100644
index 000000000..6e74cbd3e
--- /dev/null
+++ b/docs/troubleshooting/configuring-logs.md
@@ -0,0 +1,76 @@
+## Configuring SC4SNMP loglevel
+
+SC4SNMP log level can be configured in `values.yaml` file. The default value for it is `INFO`, other
+possible levels to set are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` or `FATAL`. To change
+the log level for a specific component, add the following configuration to `values.yaml`:
+
+```yaml
+worker:
+ logLevel: "DEBUG"
+```
+
+And redeploy SC4SNMP.
+
+Log level configuration can be set for `worker`, `poller`, `scheduler` and `traps`.
+
+## Accessing SC4SNMP logs
+
+SC4SNMP logs can be browsed in Splunk in `em_logs` index, provided that [sck-otel](../gettingstarted/sck-installation.md)
+is installed. Logs can be also accessed directly in kubernetes using terminal.
+
+### Accessing logs via Splunk
+If [sck-otel](../gettingstarted/sck-installation.md) is installed, browse `em_logs` index. Logs can be further filtered
+for example by the sourcetype field. Example search command to get logs from poller:
+```
+index=em_logs sourcetype="kube:container:splunk-connect-for-snmp-worker-poller"
+```
+
+### Accessing logs in kubernetes
+To access logs directly in kubernetes, first run `microk8s kubectl -n sc4snmp get pods`. This will output all pods:
+```
+NAME READY STATUS RESTARTS AGE
+snmp-splunk-connect-for-snmp-worker-trap-99f49c557-j9jwx 1/1 Running 0 29m
+snmp-splunk-connect-for-snmp-trap-56f75f9754-kmlgb 1/1 Running 0 29m
+snmp-splunk-connect-for-snmp-scheduler-7bb8c79855-rgjkj 1/1 Running 0 29m
+snmp-mibserver-784bd599fd-6xzfj 1/1 Running 0 29m
+snmp-splunk-connect-for-snmp-worker-poller-78b46d668f-59mv4 1/1 Running 0 29m
+snmp-splunk-connect-for-snmp-worker-sender-6f8496bfbf-cvt9l 1/1 Running 0 29m
+snmp-mongodb-7579dc7867-mlnst 2/2 Running 0 29m
+snmp-redis-master-0 1/1 Running 0 29m
+```
+
+Now select the desired pod and run `microk8s kubectl -n sc4snmp logs pod/` command. Example command to retrieve
+logs from `splunk-connect-for-snmp-worker-poller`:
+```
+microk8s kubectl -n sc4snmp logs pod/snmp-splunk-connect-for-snmp-worker-poller-78b46d668f-59mv4
+```
+
+### Accessing logs in docker
+
+Refer to [splunk logging](../dockercompose/9-splunk-logging.md) for instructions on how to enable logging in docker and
+sent them to Splunk.
+
+To access logs directly in docker, first run `docker ps`. This will output all containers:
+
+```
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+afcd8f4850cd ghcr.io/splunk/splunk-connect-for-snmp/container:1.12.0-beta.1 "./entrypoint.sh cel…" 19 seconds ago Up 17 seconds docker_compose-worker-poller-1
+5cea46cee0cb ghcr.io/splunk/splunk-connect-for-snmp/container:1.12.0-beta.1 "./entrypoint.sh cel…" 19 seconds ago Up 17 seconds docker_compose-worker-sender-1
+1c5154c91191 ghcr.io/splunk/splunk-connect-for-snmp/container:1.12.0-beta.1 "./entrypoint.sh cel…" 19 seconds ago Up 17 seconds sc4snmp-scheduler
+8f6e60903780 ghcr.io/splunk/splunk-connect-for-snmp/container:1.12.0-beta.1 "./entrypoint.sh trap" 19 seconds ago Up 17 seconds 0.0.0.0:2163->2163/udp, :::2163->2163/udp, 0.0.0.0:162->2162/udp, [::]:162->2162/udp sc4snmp-traps
+f146802a0a8d ghcr.io/splunk/splunk-connect-for-snmp/container:1.12.0-beta.1 "./entrypoint.sh cel…" 19 seconds ago Up 16 seconds docker_compose-worker-poller-2
+70e0fe076cdf ghcr.io/splunk/splunk-connect-for-snmp/container:1.12.0-beta.1 "./entrypoint.sh cel…" 19 seconds ago Up 17 seconds docker_compose-worker-trap-2
+090cc957b600 ghcr.io/splunk/splunk-connect-for-snmp/container:1.12.0-beta.1 "./entrypoint.sh cel…" 19 seconds ago Up 16 seconds docker_compose-worker-trap-1
+24aac5c89d80 ghcr.io/pysnmp/mibs/container:latest "/bin/sh -c '/app/lo…" 19 seconds ago Up 18 seconds 8080/tcp snmp-mibserver
+a5bef5a5a02c bitnami/mongodb:6.0.9-debian-11-r5 "/opt/bitnami/script…" 19 seconds ago Up 18 seconds 27017/tcp mongo
+76f966236c1b bitnami/redis:7.2.1-debian-11-r0 "/opt/bitnami/script…" 19 seconds ago Up 18 seconds 6379/tcp redis
+163d880eaf8c coredns/coredns:1.11.1 "/coredns -conf /Cor…" 19 seconds ago Up 18 seconds 53/tcp, 53/udp coredns
+```
+
+Now select the desired container and run `docker logs ` command.
+Example command to retrieve logs from `splunk-connect-for-snmp-worker-poller`:
+
+```
+docker logs docker_compose-worker-poller-1
+```
+
diff --git a/docs/troubleshooting/docker-commands.md b/docs/troubleshooting/docker-commands.md
new file mode 100644
index 000000000..25c4cc23b
--- /dev/null
+++ b/docs/troubleshooting/docker-commands.md
@@ -0,0 +1,255 @@
+## Docker commands
+
+For full display of docker commands and their usage can be found at [docker documentation](https://docs.docker.com/reference/cli/docker/).
+Below are the most common commands used to troubleshoot issues with SC4SNMP.
+
+### Common flags
+The following are some common flags that can be used with the `docker` commands:
+
+- `-a` flag is used to list all resources
+
+For more flags and options, you can refer to the [docker documentation](https://docs.docker.com/reference/cli/docker/).
+
+### Accessing logs in docker
+
+The instruction on how to set up and access the logs can be found in [SC4SNMP logs](configuring-logs.md#accessing-logs-in-docker)
+
+### The ls and ps commands
+
+The `ls` or `ps` command are used to list the resources in docker. The following are the example of resources that
+can be listed using the commands:
+
+```
+docker compose ls
+docker network ls
+docker image ls
+docker container ls
+docker ps
+docker ps -a
+docker compose ps
+```
+
+### The inspect command
+
+The `inspect` command is used to get detailed information about the resources in docker. The following are the
+example of resources that can be inspected:
+
+```
+docker inspect --type
+docker network inspect
+docker image inspect
+```
+
+### The logs command
+
+The `logs` command is used to get the logs of the resources in docker.
+The following are some examples of how to use the `logs` command:
+
+```
+docker logs
+docker compose logs
+```
+
+### The exec command
+
+The `exec` command is used to execute a command in a running container. The following is an example of how to
+use the `exec` command:
+
+```
+docker exec -it sh -c
+```
+
+### The stats command
+
+The `stats` command is used to display the live resource usage statistics of a container. The following are some
+examples of how to use the `stats` command:
+
+```
+docker stats
+docker stats
+```
+
+## Examples of command usage
+
+### Check secret for snmp v3
+
+One of the issues related to snmp v3 can be incorrectly configured secrets in docker.
+Below you can find the instruction to check the existing secrets.
+
+To check the existing secrets:
+```
+~$ docker exec -it docker_compose-worker-poller-1 sh -c "ls secrets/snmpv3"
+my_secret
+```
+To get more details about one secret you can use command:
+```
+~$ docker exec -it docker_compose-worker-poller-1 sh -c "ls secrets/snmpv3/my_secret"
+authKey authProtocol contextEngineId privKey privProtocol userName
+```
+Replace **my_secret** with the name of the secret you want to check and **docker_compose-worker-poller-1** with the name of the container.
+
+To see the configured details of the secret:
+```
+~$ docker exec -it docker_compose-worker-poller-1 sh -c 'cd secrets/snmpv3/my_secret && for file in *; do echo "$file= $(cat $file)"; done'
+authKey= admin1234
+authProtocol= SHA
+contextEngineId= 80003a8c04
+privKey= admin1234
+privProtocol= AES
+userName= r-wuser
+```
+Replace **my_secret** with the name of the secret you want to check and **docker_compose-worker-poller-1** with the name of the container.
+
+### Check containers health
+To check the health of the containers, you can use the `ps` command to look at the `STATUS`.
+If the `STATUS` is not `Up` or the containers restarts continuously, then there might be an issue with it.
+You can also use the `inspect` command to get more detailed information about the container and see if there are any
+errors or warnings in the `state` or use `logs` command to see the logs of the container.
+
+### Check resource usage
+To check the resource usage of the containers, you can use the `stats` command.
+With this command, you can see the CPU and memory usage of the containers in real-time and compare it with the ones
+assigned in `resources` section in the configuration yaml.
+If they are close to each other you might consider increasing the resources assigned.
+
+### Check network
+Checking the network configuration can be useful when enabling the dual-stack for SC4SNMP.
+
+
+One of useful commands to check the network configuration is:
+```
+~$ docker network ls
+NETWORK ID NAME DRIVER SCOPE
+7e46b3818089 bridge bridge local
+1401c370b8f4 docker_gwbridge bridge local
+12ca971fa954 host host local
+rssypcqbwarx ingress overlay swarm
+b6c176852f41 none null local
+978e06ffcd4a sc4snmp_network bridge local
+```
+This command is showing all the network configured in the docker. The network created for sc4snmp by default is named `sc4snmp_network`.
+
+To see details of configured network use:
+```
+~$ docker network inspect sc4snmp_network
+[
+ {
+ "Name": "sc4snmp_network",
+ "Id": "978e06ffcd4a49de5cd78a038050530342a029b1b1a1f1967254f701ae5ff1a0",
+ "Created": "2024-10-10T11:38:01.627727666Z",
+ "Scope": "local",
+ "Driver": "bridge",
+ "EnableIPv6": false,
+ "IPAM": {
+ "Driver": "default",
+ "Options": null,
+ "Config": [
+ {
+ "Subnet": "172.28.0.0/16",
+ "Gateway": "172.28.0.1"
+ },
+ {
+ "Subnet": "fd02::/64",
+ "Gateway": "fd02::1"
+ }
+ ]
+ },
+ "Internal": false,
+ "Attachable": false,
+ "Ingress": false,
+ "ConfigFrom": {
+ "Network": ""
+ },
+ "ConfigOnly": false,
+ "Containers": {
+ "231b21c24bd722d684349174cc5aebf40cf294617aa98741a4af1269ed930fcc": {
+ "Name": "docker_compose-worker-poller-1",
+ "EndpointID": "0195750f0539535615ebdb24d8ee7eb967d31ca3c86a0d5d4b5c21f907cb61a0",
+ "MacAddress": "02:42:ac:1c:00:0b",
+ "IPv4Address": "172.28.0.11/16",
+ "IPv6Address": ""
+ },
+ "25479e15afee663a7d0ad7b97f734f65d35672c49e9610f9e0406975d616e584": {
+ "Name": "snmp-mibserver",
+ "EndpointID": "68a27a27fc5acc7b1350cb5f073abf9218f1c0fa4ede5f037a67fdcce46ec91b",
+ "MacAddress": "02:42:ac:1c:00:03",
+ "IPv4Address": "172.28.0.3/16",
+ "IPv6Address": ""
+ },
+ "35f2bdd191898f7186a0c00dbffa5cc700e9d72e07efb6f3b341c6b8ce14d5f5": {
+ "Name": "coredns",
+ "EndpointID": "0c76c32e9b9b1dd033141332dee9a8f954c4a83ea5344ee4c93af057d2523d9a",
+ "MacAddress": "02:42:ac:1c:00:ff",
+ "IPv4Address": "172.28.0.255/16",
+ "IPv6Address": ""
+ },
+ "3dc9f0d293578a7aca1b6b33cc3557f82262849e2be488a9cda729152854b9a9": {
+ "Name": "docker_compose-worker-trap-2",
+ "EndpointID": "88fc3701b04803d6317ad5d23031f880ec96c2206185c1994184580932ed5865",
+ "MacAddress": "02:42:ac:1c:00:0c",
+ "IPv4Address": "172.28.0.12/16",
+ "IPv6Address": ""
+ },
+ "43c5893f2688da599dd0331a328937b19a62496f4eb06eaa40a9cad8e879c567": {
+ "Name": "redis",
+ "EndpointID": "c1c91866f67ed76d83e78a6b11e5001b0cf65107df3b7d4733373653be7f5e6a",
+ "MacAddress": "02:42:ac:1c:00:04",
+ "IPv4Address": "172.28.0.4/16",
+ "IPv6Address": ""
+ },
+ "52fa13245149422e559d4ff7a2f6c929b46ebfffdbafb52efcaade26e861128e": {
+ "Name": "sc4snmp-traps",
+ "EndpointID": "926187b2e4c3e9753dd260e8fa9db2745c20ed6c87f73f2df4870f0cb3be1511",
+ "MacAddress": "02:42:ac:1c:00:05",
+ "IPv4Address": "172.28.0.5/16",
+ "IPv6Address": ""
+ },
+ "68813263e9d6a74e70061f85f9044ec334cce9aee364804566b4823e6960ae04": {
+ "Name": "docker_compose-worker-poller-2",
+ "EndpointID": "06d883d0ee21926be450b8c0adf4c31da7f13ceaa70dba3d0830608d5c192b2d",
+ "MacAddress": "02:42:ac:1c:00:08",
+ "IPv4Address": "172.28.0.8/16",
+ "IPv6Address": ""
+ },
+ "78b04a7cd5c9ec1d3aaf014fd10c0ad89d401ad63093052a26111066198639af": {
+ "Name": "docker_compose-worker-sender-1",
+ "EndpointID": "0e9c84d4e7d1ce6362bba33c41161086a2de4623161a0ef34ce746d9983a4be7",
+ "MacAddress": "02:42:ac:1c:00:09",
+ "IPv4Address": "172.28.0.9/16",
+ "IPv6Address": ""
+ },
+ "a34c808997eb56ab5c4043be3d9cd5ceb86f5b0f481b7bd51009eace9ff12965": {
+ "Name": "mongo",
+ "EndpointID": "992f5fd3eed5e646c250d61cc1d3c94bf43dc2ad0621f0044dbfd718d24325d5",
+ "MacAddress": "02:42:ac:1c:00:02",
+ "IPv4Address": "172.28.0.2/16",
+ "IPv6Address": ""
+ },
+ "b197d6b5ac9a0a69d8afb9a613006e916eacffd4c3a2c71e3ee8db927c307457": {
+ "Name": "sc4snmp-scheduler",
+ "EndpointID": "3753aec5d05a24683fb04f29284297444957e466fd5d5ffc6f40f8b58d04c443",
+ "MacAddress": "02:42:ac:1c:00:07",
+ "IPv4Address": "172.28.0.7/16",
+ "IPv6Address": ""
+ },
+ "b52716b229679ec14fcc3236eee4e64f6f2b2c257889979ebb7d4b091c8cd0db": {
+ "Name": "docker_compose-worker-trap-1",
+ "EndpointID": "f1066da76315c595b6bd606e2f0437b16ec33b2c16e3f659682910e6a79ecb24",
+ "MacAddress": "02:42:ac:1c:00:0a",
+ "IPv4Address": "172.28.0.10/16",
+ "IPv6Address": ""
+ }
+ },
+ "Options": {},
+ "Labels": {
+ "com.docker.compose.network": "sc4snmp_network",
+ "com.docker.compose.project": "docker_compose",
+ "com.docker.compose.version": "2.29.7"
+ }
+ }
+]
+```
+
+One section of the command is showing the `containers` assigned to that network with their ipv4 and ipv6 addresses.
+The commands also shows if the ipv6 is enabled and what subnets are assigned to the network.
+
diff --git a/docs/troubleshooting/k8s-commands.md b/docs/troubleshooting/k8s-commands.md
new file mode 100644
index 000000000..aea96a9f7
--- /dev/null
+++ b/docs/troubleshooting/k8s-commands.md
@@ -0,0 +1,267 @@
+## Kubectl commands
+
+For full display of kubernetes commands and their usage can be found at [kubectl documentation](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands).
+Below are the most common commands used to troubleshoot issues with SC4SNMP.
+
+### Common flags
+The following are some common flags that can be used with the `kubectl` commands:
+
+- `-A` flag is used to list all resources in all namespaces
+- `-n` flag is used to specify the namespace of the resource
+- `-f` flag is used to specify the file that contains the resource definition
+- `-o` flag is used to specify the output format of the command
+
+For more flags and options, you can refer to the [kubectl documentation](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands).
+
+### Accessing logs in kubernetes
+
+The instruction on how to set up and access the logs can be found in [SC4SNMP logs](configuring-logs.md#accessing-logs-via-splunk)
+
+### The get command
+The `get` command is used to list one or more resources of selected type. The following are some examples of how to use the `get` command:
+```
+microk8s kubectl get all
+microk8s kubectl get pods
+microk8s kubectl get svc
+microk8s kubectl get deployments
+microk8s kubectl get events
+microk8s kubectl get nodes
+microk8s kubectl get configmaps
+microk8s kubectl get secrets
+microk8s kubectl get ippools
+```
+
+For example to list all pods running in sc4snmp namespace you can use command:
+```
+~$ microk8s kubectl get pods -n sc4snmp
+NAME READY STATUS RESTARTS AGE
+snmp-mibserver-95df967b9-cjhvz 1/1 Running 1 (5h13m ago) 27h
+snmp-mongodb-6dc5c4f74d-pxpxb 2/2 Running 2 (5h13m ago) 27h
+snmp-redis-master-0 1/1 Running 1 (5h13m ago) 25h
+snmp-splunk-connect-for-snmp-scheduler-7c675d7dd7-6ql2g 1/1 Running 2 (5h13m ago) 27h
+snmp-splunk-connect-for-snmp-trap-755b58b8c5-kg5f4 1/1 Running 1 (5h13m ago) 27h
+snmp-splunk-connect-for-snmp-trap-755b58b8c5-r8szq 1/1 Running 1 (5h13m ago) 27h
+snmp-splunk-connect-for-snmp-worker-poller-5956f6dfb4-rs7mv 1/1 Running 1 (5h13m ago) 27h
+snmp-splunk-connect-for-snmp-worker-poller-5956f6dfb4-wjxb6 1/1 Running 1 (5h13m ago) 27h
+snmp-splunk-connect-for-snmp-worker-sender-76f5d49478-spvp2 1/1 Running 1 (5h13m ago) 27h
+snmp-splunk-connect-for-snmp-worker-trap-5c4dbf8889-4njg2 1/1 Running 1 (5h13m ago) 27h
+snmp-splunk-connect-for-snmp-worker-trap-5c4dbf8889-5hc6j 1/1 Running 1 (5h13m ago) 27h
+```
+
+### The describe command
+The `describe` command is used to get detailed information about a resource. The following are some examples of how to use the `describe` command:
+```
+microk8s kubectl describe all
+microk8s kubectl describe pod
+microk8s kubectl describe svc
+microk8s kubectl describe deployment
+microk8s kubectl describe events
+microk8s kubectl describe node
+microk8s kubectl describe configmap
+microk8s kubectl describe secret
+microk8s kubectl describe ippool
+```
+
+For example to get detailed information about a service you can use command:
+```
+~$ microk8s kubectl describe svc/snmp-splunk-connect-for-snmp-trap -n sc4snmp
+Name: snmp-splunk-connect-for-snmp-trap
+Namespace: sc4snmp
+Labels: app.kubernetes.io/instance=snmp
+ app.kubernetes.io/managed-by=Helm
+ app.kubernetes.io/name=splunk-connect-for-snmp-trap
+ app.kubernetes.io/version=1.11.0
+ helm.sh/chart=splunk-connect-for-snmp-1.11.0
+Annotations: meta.helm.sh/release-name: snmp
+ meta.helm.sh/release-namespace: sc4snmp
+ metallb.universe.tf/allow-shared-ip: splunk-connect
+Selector: app.kubernetes.io/instance=snmp,app.kubernetes.io/name=splunk-connect-for-snmp-trap
+Type: LoadBalancer
+IP Family Policy: SingleStack
+IP Families: IPv4
+IP: 10.153.183.151
+IPs: 10.153.183.151
+IP: 34.207.186.189
+LoadBalancer Ingress: 34.207.186.189
+Port: snmp-udp 162/UDP
+TargetPort: 2162/UDP
+NodePort: snmp-udp 31810/UDP
+Endpoints: 10.3.209.194:2162,10.3.209.210:2162
+Session Affinity: None
+External Traffic Policy: Local
+HealthCheck NodePort: 31789
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal nodeAssigned 95s (x45 over 3h30m) metallb-speaker announcing from node "ip-172-31-18-142" with protocol "layer2"
+```
+
+### The exec command
+
+The `exec` command is used to execute a command in a container. The following are some examples of how to use the `exec` command:
+```
+microk8s kubectl exec -it --
+```
+
+For example to connect to the container you can use:
+```
+~$ microk8s kubectl exec -it snmp-mibserver-95df967b9-cjhvz -n sc4snmp -- /bin/bash
+I have no name!@snmp-mibserver-95df967b9-cjhvz:/app$
+```
+
+### The top command
+
+The `top` command is used to display resource (CPU/memory) usage. The following are options of how to
+use the `top` command:
+```
+microk8s kubectl top nodes
+microk8s kubectl top pods
+```
+
+For example to display resource usage of nodes you can use:
+```
+~$ microk8s kubectl top pods
+NAME CPU(cores) MEMORY(bytes)
+sck-splunk-otel-collector-agent-jrl62 34m 209Mi
+sck-splunk-otel-collector-k8s-cluster-receiver-5c56564cf5-ks2zb 3m 99Mi
+```
+
+
+## Examples of command usage
+
+### Check secret for snmp v3
+
+One of the issues related to snmp v3 can be incorrectly configured secrets in kubernetes.
+Below you can find the instruction to check the existing secrets and decode their value.
+
+To check the existing secrets:
+```
+~$ microk8s kubectl get secret -n sc4snmp
+NAME TYPE DATA AGE
+sh.helm.release.v1.snmp.v1 helm.sh/release.v1 1 23h
+sh.helm.release.v1.snmp.v2 helm.sh/release.v1 1 21h
+splunk-connect-for-snmp-splunk Opaque 1 23h
+testing1 Opaque 6 68m
+```
+To get more details about one secret you can use command:
+```
+~$ microk8s kubectl describe secret/testing1 -n sc4snmp
+Name: testing1
+Namespace: sc4snmp
+Labels:
+Annotations:
+
+Type: Opaque
+
+Data
+====
+privProtocol: 3 bytes
+securityName: 7 bytes
+userName: 8 bytes
+authKey: 10 bytes
+authProtocol: 3 bytes
+privKey: 10 bytes
+```
+The secrets in kubernetes are not visible in describe command. To fully see them you have to decode them.
+Below are some methods to do that:
+
+- With json query:
+```
+~$ microk8s kubectl get secrets/testing1 -n sc4snmp -o json | jq '.data | map_values(@base64d)'
+{
+ "authKey": "testing123",
+ "authProtocol": "MD5",
+ "privKey": "testing123",
+ "privProtocol": "AES",
+ "securityName": "testing",
+ "userName": "testing1"
+}
+```
+
+- With template:
+```
+~$ microk8s kubectl get secrets/testing1 -n sc4snmp --template='{{ range $key, $value := .data }}{{ printf "%s: %s\n" $key ($value | base64decode) }}{{ end }}'
+authKey: testing123
+authProtocol: MD5
+privKey: testing123
+privProtocol: AES
+securityName: testing
+userName: testing1
+```
+
+You can also check [this](https://stackoverflow.com/questions/56909180/decoding-kubernetes-secret) thread for different decoding methods.
+
+
+### Check pods health
+To check the health of the pods, you can use the `get` command to look at the `STATUS` and `RESTARTS` columns.
+If the `STATUS` is not `Running` or the `RESTARTS` is not `0`, then there might be an issue with the pod.
+You can also use the `describe` command to get more detailed information about the pod and see if there are any errors or warnings in the `Events`.
+
+### Check resource usage
+To check the resource usage of the nodes and pods, you can use the `top` command.
+With this command, you can see the CPU and memory usage of the nodes and pods and compare it with the ones
+assigned in `resources` section in the configuration yaml.
+If they are close to each other you might consider increasing the resources assigned.
+
+### Check network
+Checking the network configuration can be useful when enabling the dual-stack for SC4SNMP.
+The default network controller used by the microk8s is `calico`.
+
+One of useful commands to check the network configuration is:
+```
+~$ microk8s kubectl describe daemonset/calico-node -n kube-system
+(...)
+ Environment:
+ DATASTORE_TYPE: kubernetes
+ WAIT_FOR_DATASTORE: true
+ NODENAME: (v1:spec.nodeName)
+ CALICO_NETWORKING_BACKEND: Optional: false
+ CLUSTER_TYPE: k8s,bgp
+ IP: autodetect
+ IP_AUTODETECTION_METHOD: first-found
+ CALICO_IPV4POOL_VXLAN: Always
+ IP6_AUTODETECTION_METHOD: first-found
+ CALICO_IPV6POOL_CIDR: fd02::/64
+ IP6: autodetect
+ CALICO_IPV6POOL_VXLAN: Always
+ FELIX_IPINIPMTU: Optional: false
+ FELIX_VXLANMTU: Optional: false
+ FELIX_WIREGUARDMTU: Optional: false
+ CALICO_IPV4POOL_CIDR: 10.3.0.0/16
+ CALICO_DISABLE_FILE_LOGGING: true
+ FELIX_DEFAULTENDPOINTTOHOSTACTION: ACCEPT
+ FELIX_IPV6SUPPORT: true
+ FELIX_HEALTHENABLED: true
+ FELIX_FEATUREDETECTOVERRIDE: ChecksumOffloadBroken=true
+(...)
+```
+One section of the command is showing the `environment` variables used by the `calico` network controller.
+With seeing them we can check if the different versions of IP are enabled and if the pools for them are
+configured with subnet.
+
+Next useful command to check when having issues with connectivity is:
+```
+~$ microk8s kubectl describe service/webhook-service -n metallb-system
+Name: webhook-service
+Namespace: metallb-system
+Labels:
+Annotations:
+Selector: component=controller
+Type: ClusterIP
+IP Family Policy: SingleStack
+IP Families: IPv4
+IP: 10.153.183.249
+IPs: 10.153.183.249
+Port: 443/TCP
+TargetPort: 9443/TCP
+Endpoints: 10.3.209.208:9443
+Session Affinity: None
+```
+`Metallb` is the network load-balancer used by the SC4SNMP.
+With checking the service configuration we can see the IP assigned to the service and the port it is listening on.
+When having the issues with dual-stack configuration the `IP Family Policy` and the `IP Families` fields should be checked.
+
+### Check service configuration
+
+Checking the service configuration can be useful when having issues with the traps connectivity.
+For better explanation refer to: [Wrong IP or port](../traps-issues#wrong-ip-or-port)
diff --git a/docs/troubleshooting/polling-issues.md b/docs/troubleshooting/polling-issues.md
new file mode 100644
index 000000000..5a6cfb1b8
--- /dev/null
+++ b/docs/troubleshooting/polling-issues.md
@@ -0,0 +1,132 @@
+# Identifying Polling and Walk Issues
+
+## Check when SNMP WALK was executed last time for the device
+1. [Configure Splunk OpenTelemetry Collector for Kubernetes](../gettingstarted/sck-installation.md) or [Configure Docker Logs for Splunk](../dockercompose/9-splunk-logging.md)
+2. Go to your Splunk and execute search: `index="em_logs" "Sending due task" "sc4snmp;;walk"`
+and replace with the pertinent IP Address.
+
+## "Empty SNMP response message" problem
+If you see the following line in the worker's logs:
+
+```log
+[2022-01-04 11:44:22,553: INFO/ForkPoolWorker-1] Task splunk_connect_for_snmp.snmp.tasks.walk[8e62fc62-569c-473f-a765-ff92577774e5] retry: Retry in 3489s: SnmpActionError('An error of SNMP isWalk=True for a host 192.168.10.20 occurred: Empty SNMP response message')
+```
+that causes an infinite retry of the walk operation. Add `worker.ignoreEmptyVarbinds` parameter to `values.yaml` and set it to true.
+
+An example configuration for a worker in `values.yaml` is:
+
+```yaml
+worker:
+ ignoreEmptyVarbinds: true
+```
+
+## "OID not increasing" problem
+In case you see the following line in worker's logs:
+
+```log
+[2022-01-04 11:44:22,553: INFO/ForkPoolWorker-1] Task splunk_connect_for_snmp.snmp.tasks.walk[8e62fc62-569c-473f-a765-ff92577774e5] retry: Retry in 3489s: SnmpActionError('An error of SNMP isWalk=True for a host 192.168.10.20 occurred: OID not increasing')
+```
+that causes infinite retry of walk operation, add `worker.ignoreNotIncreasingOid` array to `values.yaml` and fill with the addresses of hosts where the problem appears.
+
+An example configuration for a worker in `values.yaml` is:
+
+```yaml
+worker:
+ ignoreNotIncreasingOid:
+ - "127.0.0.1:164"
+ - "127.0.0.6"
+```
+
+If you put in only the IP address (for example, `127.0.0.1`), then errors will be ignored for all of its devices (like `127.0.0.1:161`,
+`127.0.0.1:163`...). If you put the IP address and host as `{host}:{port}`, that means the error will be ignored only for this device.
+
+## Walking a device takes too much time
+
+See [Configure small walk profile](../../configuration/configuring-profiles/#walk-profile) to enable the small walk
+functionality.
+
+## An error of SNMP isWalk=True blocks traffic on the SC4SNMP instance
+
+If you see many `An error of SNMP isWalk=True` errors in your logs, that means that there is a connection problem
+with the hosts you are polling from.
+Walk will retry multiple times, which will eventually cause a worker to be blocked while it retries. In that case, you might want to limit
+the maximum retry time. You can do this by setting the variable `worker.walkRetryMaxInterval`, for example:
+
+```yaml
+worker:
+ walkRetryMaxInterval: 60
+```
+
+With the previous configuration, 'walk' will retry exponentially from 30 seconds until it reaches 60 seconds. The default value for `worker.walkRetryMaxInterval` is 180.
+
+## SNMP Rollover
+The Rollover problem is due to a finite stored integer value (especially when the value is 32-bit).
+When it reaches its maximum, it gets rolled down to 0 again. This causes a strange drop in Analytics data.
+The most common case of this issue is interface speed on high speed ports. As a solution to this problem, SNMPv2 SMI defined a new object type, counter64, for 64-bit counters, see https://www.cisco.com/c/en/us/support/docs/ip/simple-network-management-protocol-snmp/26007-faq-snmpcounter.html.
+Not all the devices support it, but if they do, poll the counter64 type OID instead of the counter32 one.
+For example, use `ifHCInOctets` instead of `ifInOctets`.
+
+If 64-bit counter is not supported on your device, you can write your own Splunk queries that calculate the shift based on
+the maximum integer value and the current state. The same works for values large enough that they don't fit into a 64-bit value.
+An example for an appropriate Splunk query would be the following:
+
+```
+| streamstats current=f last(ifInOctets) as p_ifInOctets last(ifOutOctets) as p_ifOutOctets by ifAlias
+| eval in_delta=(ifInOctets - p_ifInOctets)
+| eval out_delta=(ifOutOctets - p_ifOutOctets)
+| eval max=pow(2,64)
+| eval out = if(out_delta<0,((max+out_delta)*8/(5*60*1000*1000*1000)),(out_delta)*8/(5*60*1000*1000*1000))
+| timechart span=5m avg(in) AS in, avg(out) AS out by ifAlias
+```
+
+## Polling authentication errors
+
+### Unknown USM user
+In case of polling SNMPv3 devices, `Unknown USM user` error suggests wrong username. Verify
+that the kubernetes secret with the correct username has been created ([SNMPv3 configuration](../configuration/snmpv3-configuration.md)).
+
+### Wrong SNMP PDU digest
+In case of polling SNMPv3 devices, `Wrong SNMP PDU digest` error suggests wrong authentication key. Verify
+that the kubernetes secret with the correct authentication key has been created ([SNMPv3 configuration](../configuration/snmpv3-configuration.md)).
+
+### No SNMP response received before timeout
+`No SNMP response received before timeout` error might have several root causes. Some of them are:
+
+- wrong device IP or port
+- SNMPv2c wrong community string
+- SNMPv3 wrong privacy key
+
+## "Field is immutable" error during helm upgrade
+
+```
+microk8s helm3 upgrade --install snmp -f values.yaml splunk-connect-for-snmp/charts/splunk-connect-for-snmp/ --namespace=sc4snmp --create-namespace
+Error: UPGRADE FAILED: cannot patch "snmp-splunk-connect-for-snmp-inventory" with kind Job: Job.batch "snmp-splunk-connect-for-snmp-inventory" is invalid: (...) : field is immutable
+```
+
+The immutable error is due to the limitation placed on an inventory job. As the SC4SNMP requires several checks before applying updates, it is designed to allow changes in the inventory task after 5 minutes.
+
+The status of the inventory can be checked with the following command:
+```
+microk8s kubectl -n sc4snmp get pods | grep inventory
+```
+If the command is not empty, wait and execute it again after the inventory job finishes. This is when it is no longer visible in the output.
+
+If the changes are required to be applied immediately, the previous inventory job can be deleted with the following command:
+```
+microk8s kubectl delete job/snmp-splunk-connect-for-snmp-inventory -n sc4snmp
+```
+The upgrade command can be executed again.
+
+## "The following profiles have invalid configuration" or "The following groups have invalid configuration" errors
+Following errors are examples of wrong configuration:
+```
+The following groups have invalid configuration and won't be used: ['group1']. Please check indentation and keywords spelling inside mentioned groups configuration.
+```
+```
+The following profiles have invalid configuration and won't be used: ['standard_profile', 'walk_profile']. Please check indentation and keywords spelling inside mentioned profiles configuration.
+```
+Errors above indicate, that the mentioned groups or profiles might have wrong indentation or some keywords were omitted or misspelled. Refer to:
+- kubernetes: [Configuring profiles](../configuration/configuring-profiles.md) or [Configuring Groups](../configuration/configuring-groups.md)
+- docker: [Scheduler configuration](../dockercompose/4-scheduler-configuration.md)
+
+sections to check how the correct configuration should look like.
\ No newline at end of file
diff --git a/docs/troubleshooting/traps-issues.md b/docs/troubleshooting/traps-issues.md
new file mode 100644
index 000000000..0df1f8fd4
--- /dev/null
+++ b/docs/troubleshooting/traps-issues.md
@@ -0,0 +1,92 @@
+# Identifying Traps issues
+
+## Wrong IP or port
+The first possible answer to why traps are not sent to Splunk is that SNMP agents send trap messages to the wrong IP
+address or port. To check what is the correct address of traps server, run the following command:
+
+```
+microk8s kubectl -n sc4snmp get services
+```
+
+This command should output similar data:
+```
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+snmp-redis-headless ClusterIP None 6379/TCP 113s
+snmp-mibserver ClusterIP 10.152.183.163 80/TCP 113s
+snmp-mongodb ClusterIP 10.152.183.118 27017/TCP 113s
+snmp-redis-master ClusterIP 10.152.183.61 6379/TCP 113s
+snmp-mongodb-metrics ClusterIP 10.152.183.50 9216/TCP 113s
+snmp-splunk-connect-for-snmp-trap LoadBalancer 10.152.183.190 114.241.233.134 162:32180/UDP 113s
+```
+
+Check the `EXTERNAL-IP` of `snmp-splunk-connect-for-snmp-trap` and the second port number for this service. In this case
+the full `snmp-splunk-connect-for-snmp-trap` address will be `114.241.233.134:32180`.
+
+
+In case agents send traps to the correct address, but there is still no data in the `netops` index, there might be some
+issues with credentials. These errors can be seen in logs of the `snmp-splunk-connect-for-snmp-trap` pod.
+
+## Unknown SNMP community name encountered
+In case of using community string for authentication purposes, the following error should be expected if the arriving trap
+has a community string not configured in SC4SNMP:
+```
+2024-02-06 15:42:14,885 ERROR Security Model failure for device ('18.226.181.199', 42514): Unknown SNMP community name encountered
+```
+
+If this error occurs, check if the appropriate community is defined under `traps.communities` in `values.yaml`. See the
+following example of a `public` community configuration:
+```yaml
+traps:
+ communities:
+ public:
+ communityIndex:
+ contextEngineId:
+ contextName:
+ tag:
+ securityName:
+```
+
+## Unknown SNMP security name encountered
+
+While sending SNMP v3 traps in case of wrong username or engine id configuration, the following error should be expected:
+```
+2024-02-06 15:42:14,091 ERROR Security Model failure for device ('18.226.181.199', 46066): Unknown SNMP security name encountered
+```
+
+If this error occurs, verify that the kubernetes secret with the correct username has been created ([SNMPv3 configuration](../configuration/snmpv3-configuration.md)).
+After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. Check that the correct snmp engine id
+is configured under `traps.securityEngineId`. See the following example of a `values.yaml` with configured secret and engine id:
+```yaml
+traps:
+ usernameSecrets:
+ - my-secret-name
+ securityEngineId:
+ - "090807060504030201"
+```
+
+## Authenticator mismatched
+
+While sending SNMP v3 traps in case of wrong authentication protocol or password configuration, the following error should be expected:
+```
+2024-02-06 15:42:14,642 ERROR Security Model failure for device ('18.226.181.199', 54806): Authenticator mismatched
+```
+If this error occurs, verify that the kubernetes secret with the correct authentication protocol and password has been created ([SNMPv3 configuration](../configuration/snmpv3-configuration.md)).
+After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. See the following example of a `values.yaml` with configured secret:
+```yaml
+traps:
+ usernameSecrets:
+ - my-secret-name
+```
+
+## Ciphering services not available or ciphertext is broken
+While sending SNMP v3 traps in case of wrong privacy protocol or password configuration, the following error should be expected:
+```
+2024-02-06 15:42:14,780 ERROR Security Model failure for device ('18.226.181.199', 48249): Ciphering services not available or ciphertext is broken
+```
+If this error occurs, verify that the kubernetes secret with the correct privacy protocol and password has been created ([SNMPv3 configuration](../configuration/snmpv3-configuration.md)).
+After creating the secret, add it under `traps.usernameSecrets` in `values.yaml`. See the following example of a `values.yaml` with configured secret:
+```yaml
+traps:
+ usernameSecrets:
+ - my-secret-name
+```
diff --git a/integration_tests/automatic_setup_compose.sh b/integration_tests/automatic_setup_compose.sh
index 2a795e776..0bc47fcfc 100755
--- a/integration_tests/automatic_setup_compose.sh
+++ b/integration_tests/automatic_setup_compose.sh
@@ -44,7 +44,7 @@ deploy_poetry() {
poetry install
poetry add --group dev splunk-sdk
poetry add --group dev splunklib
- poetry add --group dev pysnmp
+ poetry add --group dev pysnmplib
}
wait_for_containers_to_be_up() {
diff --git a/integration_tests/automatic_setup_microk8s.sh b/integration_tests/automatic_setup_microk8s.sh
index 1102b1c3c..50bbc8e88 100755
--- a/integration_tests/automatic_setup_microk8s.sh
+++ b/integration_tests/automatic_setup_microk8s.sh
@@ -44,7 +44,7 @@ deploy_poetry() {
poetry install
poetry add --group dev splunk-sdk
poetry add --group dev splunklib
- poetry add --group dev pysnmp
+ poetry add --group dev pysnmplib
}
wait_for_pod_initialization() {
diff --git a/integration_tests/deploy_and_test.sh b/integration_tests/deploy_and_test.sh
index b4f87ec7d..258ce4437 100755
--- a/integration_tests/deploy_and_test.sh
+++ b/integration_tests/deploy_and_test.sh
@@ -91,7 +91,7 @@ deploy_poetry() {
poetry install
poetry add -D splunk-sdk
poetry add -D splunklib
- poetry add -D pysnmp
+ poetry add -D pysnmplib
}
run_integration_tests() {
diff --git a/mkdocs.yml b/mkdocs.yml
index 7af1c79c3..d7561c56f 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -14,6 +14,9 @@ markdown_extensions:
- codehilite
- attr_list
- md_in_html
+ - admonition
+ - pymdownx.details
+ - pymdownx.superfences
plugins:
- search:
@@ -41,6 +44,8 @@ nav:
- Platform Microk8s: "gettingstarted/mk8s/k8s-microk8s.md"
- Install Splunk OpenTelemetry Collector for Kubernetes: "gettingstarted/sck-installation.md"
- Install SC4SNMP: "gettingstarted/sc4snmp-installation.md"
+ - Enable IPv6: "gettingstarted/enable-ipv6.md"
+ - Scaling with Microk8s: "gettingstarted/mk8s/k8s-microk8s-scaling.md"
- Configuration:
- Deployment: "configuration/deployment-configuration.md"
- Configurable values: "configuration/values-params-description.md"
@@ -78,12 +83,21 @@ nav:
- SNMPv3 secrets configuration: "dockercompose/7-snmpv3-secrets.md"
- Offline installation: "dockercompose/8-offline-installation.md"
- Sending logs to Splunk: "dockercompose/9-splunk-logging.md"
+ - Enable IPv6: "dockercompose/10-enable-ipv6.md"
- Lightweight installation: "small-environment.md"
- - Planning: "planning.md"
+ - Architecture:
+ - High-level design: "architecture/design.md"
+ - Infrastructure Planning: "architecture/planning.md"
- Security: "security.md"
- Request MIB: "mib-request.md"
- Upgrade SC4SNMP: "upgrade.md"
- - Troubleshooting: "bestpractices.md"
+ - Troubleshooting:
+ - Accessing and configuring logs: "troubleshooting/configuring-logs.md"
+ - Polling issues: "troubleshooting/polling-issues.md"
+ - Traps issues: "troubleshooting/traps-issues.md"
+ - Kubernetes commands: "troubleshooting/k8s-commands.md"
+ - Docker commands: "troubleshooting/docker-commands.md"
- Releases: "releases.md"
- - High Availability: ha.md
+ - High Availability: "ha.md"
- Improved polling performance: "improved-polling.md"
+ - Monitoring dashboard: "dashboard.md"
diff --git a/poetry.lock b/poetry.lock
index 6759d453b..faa6830c4 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -779,13 +779,13 @@ files = [
[[package]]
name = "mike"
-version = "2.1.2"
+version = "2.1.3"
description = "Manage multiple versions of your MkDocs-powered documentation"
optional = false
python-versions = "*"
files = [
- {file = "mike-2.1.2-py3-none-any.whl", hash = "sha256:d61d9b423ab412d634ca2bd520136d5114e3cc73f4bbd1aa6a0c6625c04918c0"},
- {file = "mike-2.1.2.tar.gz", hash = "sha256:d59cc8054c50f9c8a046cfd47f9b700cf9ff1b2b19f420bd8812ca6f94fa8bd3"},
+ {file = "mike-2.1.3-py3-none-any.whl", hash = "sha256:d90c64077e84f06272437b464735130d380703a76a5738b152932884c60c062a"},
+ {file = "mike-2.1.3.tar.gz", hash = "sha256:abd79b8ea483fb0275b7972825d3082e5ae67a41820f8d8a0dc7a3f49944e810"},
]
[package.dependencies]
@@ -804,13 +804,13 @@ test = ["coverage", "flake8 (>=3.0)", "flake8-quotes", "shtab"]
[[package]]
name = "mkdocs"
-version = "1.6.0"
+version = "1.6.1"
description = "Project documentation with Markdown."
optional = false
python-versions = ">=3.8"
files = [
- {file = "mkdocs-1.6.0-py3-none-any.whl", hash = "sha256:1eb5cb7676b7d89323e62b56235010216319217d4af5ddc543a91beb8d125ea7"},
- {file = "mkdocs-1.6.0.tar.gz", hash = "sha256:a73f735824ef83a4f3bcb7a231dcab23f5a838f88b7efc54a0eef5fbdbc3c512"},
+ {file = "mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e"},
+ {file = "mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2"},
]
[package.dependencies]
@@ -852,13 +852,13 @@ pyyaml = ">=5.1"
[[package]]
name = "mkdocs-material"
-version = "9.5.30"
+version = "9.5.40"
description = "Documentation that simply works"
optional = false
python-versions = ">=3.8"
files = [
- {file = "mkdocs_material-9.5.30-py3-none-any.whl", hash = "sha256:fc070689c5250a180e9b9d79d8491ef9a3a7acb240db0728728d6c31eeb131d4"},
- {file = "mkdocs_material-9.5.30.tar.gz", hash = "sha256:3fd417dd42d679e3ba08b9e2d72cd8b8af142cc4a3969676ad6b00993dd182ec"},
+ {file = "mkdocs_material-9.5.40-py3-none-any.whl", hash = "sha256:8e7a16ada34e79a7b6459ff2602584222f522c738b6a023d1bea853d5049da6f"},
+ {file = "mkdocs_material-9.5.40.tar.gz", hash = "sha256:b69d70e667ec51fc41f65e006a3184dd00d95b2439d982cb1586e4c018943156"},
]
[package.dependencies]
@@ -907,18 +907,21 @@ mkdocs = ">=1.1.0,<2"
[[package]]
name = "mongoengine"
-version = "0.28.2"
+version = "0.29.1"
description = "MongoEngine is a Python Object-Document Mapper for working with MongoDB."
optional = false
python-versions = ">=3.7"
files = [
- {file = "mongoengine-0.28.2-py3-none-any.whl", hash = "sha256:8e0f84a5ad3d335e5da98261454d4ab546c866241ed064adc6433fe2077d43c9"},
- {file = "mongoengine-0.28.2.tar.gz", hash = "sha256:67c35a2ebe0ee7fd8eda3766dc251b9e0aada4489bb935f7a55b4c570d148ca7"},
+ {file = "mongoengine-0.29.1-py3-none-any.whl", hash = "sha256:9302ec407dd60f47f62cc07684d9f6cac87f1e93283c54203851788104d33df4"},
+ {file = "mongoengine-0.29.1.tar.gz", hash = "sha256:3b43abaf2d5f0b7d39efc2b7d9e78f4d4a5dc7ce92b9889ba81a5a9b8dee3cf3"},
]
[package.dependencies]
pymongo = ">=3.4,<5.0"
+[package.extras]
+test = ["Pillow (>=7.0.0)", "blinker", "coverage", "pytest", "pytest-cov"]
+
[[package]]
name = "mongolock"
version = "1.3.4"
@@ -934,18 +937,18 @@ pymongo = ">=2.6.0"
[[package]]
name = "opentelemetry-api"
-version = "1.25.0"
+version = "1.27.0"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_api-1.25.0-py3-none-any.whl", hash = "sha256:757fa1aa020a0f8fa139f8959e53dec2051cc26b832e76fa839a6d76ecefd737"},
- {file = "opentelemetry_api-1.25.0.tar.gz", hash = "sha256:77c4985f62f2614e42ce77ee4c9da5fa5f0bc1e1821085e9a47533a9323ae869"},
+ {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"},
+ {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"},
]
[package.dependencies]
deprecated = ">=1.2.6"
-importlib-metadata = ">=6.0,<=7.1"
+importlib-metadata = ">=6.0,<=8.4.0"
[[package]]
name = "opentelemetry-exporter-jaeger-thrift"
@@ -965,13 +968,13 @@ thrift = ">=0.10.0"
[[package]]
name = "opentelemetry-instrumentation"
-version = "0.46b0"
+version = "0.48b0"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_instrumentation-0.46b0-py3-none-any.whl", hash = "sha256:89cd721b9c18c014ca848ccd11181e6b3fd3f6c7669e35d59c48dc527408c18b"},
- {file = "opentelemetry_instrumentation-0.46b0.tar.gz", hash = "sha256:974e0888fb2a1e01c38fbacc9483d024bb1132aad92d6d24e2e5543887a7adda"},
+ {file = "opentelemetry_instrumentation-0.48b0-py3-none-any.whl", hash = "sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44"},
+ {file = "opentelemetry_instrumentation-0.48b0.tar.gz", hash = "sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35"},
]
[package.dependencies]
@@ -981,67 +984,68 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-celery"
-version = "0.46b0"
+version = "0.48b0"
description = "OpenTelemetry Celery Instrumentation"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_instrumentation_celery-0.46b0-py3-none-any.whl", hash = "sha256:431125dc56bced2ade779c15cf094b049d7dccdb1ff6fa59aea0c236443a71c0"},
- {file = "opentelemetry_instrumentation_celery-0.46b0.tar.gz", hash = "sha256:688b157c1fdddc48b4fdd029d35b4f3472cc2100b38914e3203b9bef9f15cd7f"},
+ {file = "opentelemetry_instrumentation_celery-0.48b0-py3-none-any.whl", hash = "sha256:c1904e38cc58fb2a33cd657d6e296285c5ffb0dca3f164762f94b905e5abc88e"},
+ {file = "opentelemetry_instrumentation_celery-0.48b0.tar.gz", hash = "sha256:1d33aa6c4a1e6c5d17a64215245208a96e56c9d07611685dbae09a557704af26"},
]
[package.dependencies]
opentelemetry-api = ">=1.12,<2.0"
-opentelemetry-instrumentation = "0.46b0"
-opentelemetry-semantic-conventions = "0.46b0"
+opentelemetry-instrumentation = "0.48b0"
+opentelemetry-semantic-conventions = "0.48b0"
[package.extras]
instruments = ["celery (>=4.0,<6.0)"]
[[package]]
name = "opentelemetry-instrumentation-logging"
-version = "0.46b0"
+version = "0.48b0"
description = "OpenTelemetry Logging instrumentation"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_instrumentation_logging-0.46b0-py3-none-any.whl", hash = "sha256:704e8ab4fe99202ad2178c6b1bac57a8954195ea6e5c08a24740623929ea8707"},
- {file = "opentelemetry_instrumentation_logging-0.46b0.tar.gz", hash = "sha256:dd8fd8a96255aa651c9b2c6dc91e3572383035015e4018a3b4d396ec9b08f0e0"},
+ {file = "opentelemetry_instrumentation_logging-0.48b0-py3-none-any.whl", hash = "sha256:75e5357d9b8c12071a19e1fef664dc1f430ef45874445c324ba4439a00972dc0"},
+ {file = "opentelemetry_instrumentation_logging-0.48b0.tar.gz", hash = "sha256:529eb13eedf57d6b2f94e20e996271db2957b817b9457fe4796365d6d4238dec"},
]
[package.dependencies]
opentelemetry-api = ">=1.12,<2.0"
-opentelemetry-instrumentation = "0.46b0"
+opentelemetry-instrumentation = "0.48b0"
[[package]]
name = "opentelemetry-sdk"
-version = "1.25.0"
+version = "1.27.0"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_sdk-1.25.0-py3-none-any.whl", hash = "sha256:d97ff7ec4b351692e9d5a15af570c693b8715ad78b8aafbec5c7100fe966b4c9"},
- {file = "opentelemetry_sdk-1.25.0.tar.gz", hash = "sha256:ce7fc319c57707ef5bf8b74fb9f8ebdb8bfafbe11898410e0d2a761d08a98ec7"},
+ {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"},
+ {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"},
]
[package.dependencies]
-opentelemetry-api = "1.25.0"
-opentelemetry-semantic-conventions = "0.46b0"
+opentelemetry-api = "1.27.0"
+opentelemetry-semantic-conventions = "0.48b0"
typing-extensions = ">=3.7.4"
[[package]]
name = "opentelemetry-semantic-conventions"
-version = "0.46b0"
+version = "0.48b0"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_semantic_conventions-0.46b0-py3-none-any.whl", hash = "sha256:6daef4ef9fa51d51855d9f8e0ccd3a1bd59e0e545abe99ac6203804e36ab3e07"},
- {file = "opentelemetry_semantic_conventions-0.46b0.tar.gz", hash = "sha256:fbc982ecbb6a6e90869b15c1673be90bd18c8a56ff1cffc0864e38e2edffaefa"},
+ {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"},
+ {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"},
]
[package.dependencies]
-opentelemetry-api = "1.25.0"
+deprecated = ">=1.2.6"
+opentelemetry-api = "1.27.0"
[[package]]
name = "packaging"
@@ -1203,54 +1207,54 @@ files = [
[[package]]
name = "pydantic"
-version = "1.10.17"
+version = "1.10.18"
description = "Data validation and settings management using python type hints"
optional = false
python-versions = ">=3.7"
files = [
- {file = "pydantic-1.10.17-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fa51175313cc30097660b10eec8ca55ed08bfa07acbfe02f7a42f6c242e9a4b"},
- {file = "pydantic-1.10.17-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7e8988bb16988890c985bd2093df9dd731bfb9d5e0860db054c23034fab8f7a"},
- {file = "pydantic-1.10.17-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:371dcf1831f87c9e217e2b6a0c66842879a14873114ebb9d0861ab22e3b5bb1e"},
- {file = "pydantic-1.10.17-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4866a1579c0c3ca2c40575398a24d805d4db6cb353ee74df75ddeee3c657f9a7"},
- {file = "pydantic-1.10.17-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:543da3c6914795b37785703ffc74ba4d660418620cc273490d42c53949eeeca6"},
- {file = "pydantic-1.10.17-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7623b59876f49e61c2e283551cc3647616d2fbdc0b4d36d3d638aae8547ea681"},
- {file = "pydantic-1.10.17-cp310-cp310-win_amd64.whl", hash = "sha256:409b2b36d7d7d19cd8310b97a4ce6b1755ef8bd45b9a2ec5ec2b124db0a0d8f3"},
- {file = "pydantic-1.10.17-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fa43f362b46741df8f201bf3e7dff3569fa92069bcc7b4a740dea3602e27ab7a"},
- {file = "pydantic-1.10.17-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2a72d2a5ff86a3075ed81ca031eac86923d44bc5d42e719d585a8eb547bf0c9b"},
- {file = "pydantic-1.10.17-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4ad32aed3bf5eea5ca5decc3d1bbc3d0ec5d4fbcd72a03cdad849458decbc63"},
- {file = "pydantic-1.10.17-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aeb4e741782e236ee7dc1fb11ad94dc56aabaf02d21df0e79e0c21fe07c95741"},
- {file = "pydantic-1.10.17-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d2f89a719411cb234105735a520b7c077158a81e0fe1cb05a79c01fc5eb59d3c"},
- {file = "pydantic-1.10.17-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db3b48d9283d80a314f7a682f7acae8422386de659fffaba454b77a083c3937d"},
- {file = "pydantic-1.10.17-cp311-cp311-win_amd64.whl", hash = "sha256:9c803a5113cfab7bbb912f75faa4fc1e4acff43e452c82560349fff64f852e1b"},
- {file = "pydantic-1.10.17-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:820ae12a390c9cbb26bb44913c87fa2ff431a029a785642c1ff11fed0a095fcb"},
- {file = "pydantic-1.10.17-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c1e51d1af306641b7d1574d6d3307eaa10a4991542ca324f0feb134fee259815"},
- {file = "pydantic-1.10.17-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e53fb834aae96e7b0dadd6e92c66e7dd9cdf08965340ed04c16813102a47fab"},
- {file = "pydantic-1.10.17-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e2495309b1266e81d259a570dd199916ff34f7f51f1b549a0d37a6d9b17b4dc"},
- {file = "pydantic-1.10.17-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:098ad8de840c92ea586bf8efd9e2e90c6339d33ab5c1cfbb85be66e4ecf8213f"},
- {file = "pydantic-1.10.17-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:525bbef620dac93c430d5d6bdbc91bdb5521698d434adf4434a7ef6ffd5c4b7f"},
- {file = "pydantic-1.10.17-cp312-cp312-win_amd64.whl", hash = "sha256:6654028d1144df451e1da69a670083c27117d493f16cf83da81e1e50edce72ad"},
- {file = "pydantic-1.10.17-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c87cedb4680d1614f1d59d13fea353faf3afd41ba5c906a266f3f2e8c245d655"},
- {file = "pydantic-1.10.17-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11289fa895bcbc8f18704efa1d8020bb9a86314da435348f59745473eb042e6b"},
- {file = "pydantic-1.10.17-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:94833612d6fd18b57c359a127cbfd932d9150c1b72fea7c86ab58c2a77edd7c7"},
- {file = "pydantic-1.10.17-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d4ecb515fa7cb0e46e163ecd9d52f9147ba57bc3633dca0e586cdb7a232db9e3"},
- {file = "pydantic-1.10.17-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7017971ffa7fd7808146880aa41b266e06c1e6e12261768a28b8b41ba55c8076"},
- {file = "pydantic-1.10.17-cp37-cp37m-win_amd64.whl", hash = "sha256:e840e6b2026920fc3f250ea8ebfdedf6ea7a25b77bf04c6576178e681942ae0f"},
- {file = "pydantic-1.10.17-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bfbb18b616abc4df70591b8c1ff1b3eabd234ddcddb86b7cac82657ab9017e33"},
- {file = "pydantic-1.10.17-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebb249096d873593e014535ab07145498957091aa6ae92759a32d40cb9998e2e"},
- {file = "pydantic-1.10.17-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8c209af63ccd7b22fba94b9024e8b7fd07feffee0001efae50dd99316b27768"},
- {file = "pydantic-1.10.17-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4b40c9e13a0b61583e5599e7950490c700297b4a375b55b2b592774332798b7"},
- {file = "pydantic-1.10.17-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c31d281c7485223caf6474fc2b7cf21456289dbaa31401844069b77160cab9c7"},
- {file = "pydantic-1.10.17-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ae5184e99a060a5c80010a2d53c99aee76a3b0ad683d493e5f0620b5d86eeb75"},
- {file = "pydantic-1.10.17-cp38-cp38-win_amd64.whl", hash = "sha256:ad1e33dc6b9787a6f0f3fd132859aa75626528b49cc1f9e429cdacb2608ad5f0"},
- {file = "pydantic-1.10.17-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17c0ee7192e54a10943f245dc79e36d9fe282418ea05b886e1c666063a7b54"},
- {file = "pydantic-1.10.17-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cafb9c938f61d1b182dfc7d44a7021326547b7b9cf695db5b68ec7b590214773"},
- {file = "pydantic-1.10.17-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95ef534e3c22e5abbdbdd6f66b6ea9dac3ca3e34c5c632894f8625d13d084cbe"},
- {file = "pydantic-1.10.17-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d96b8799ae3d782df7ec9615cb59fc32c32e1ed6afa1b231b0595f6516e8ab"},
- {file = "pydantic-1.10.17-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ab2f976336808fd5d539fdc26eb51f9aafc1f4b638e212ef6b6f05e753c8011d"},
- {file = "pydantic-1.10.17-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8ad363330557beac73159acfbeed220d5f1bfcd6b930302a987a375e02f74fd"},
- {file = "pydantic-1.10.17-cp39-cp39-win_amd64.whl", hash = "sha256:48db882e48575ce4b39659558b2f9f37c25b8d348e37a2b4e32971dd5a7d6227"},
- {file = "pydantic-1.10.17-py3-none-any.whl", hash = "sha256:e41b5b973e5c64f674b3b4720286ded184dcc26a691dd55f34391c62c6934688"},
- {file = "pydantic-1.10.17.tar.gz", hash = "sha256:f434160fb14b353caf634149baaf847206406471ba70e64657c1e8330277a991"},
+ {file = "pydantic-1.10.18-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e405ffcc1254d76bb0e760db101ee8916b620893e6edfbfee563b3c6f7a67c02"},
+ {file = "pydantic-1.10.18-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e306e280ebebc65040034bff1a0a81fd86b2f4f05daac0131f29541cafd80b80"},
+ {file = "pydantic-1.10.18-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11d9d9b87b50338b1b7de4ebf34fd29fdb0d219dc07ade29effc74d3d2609c62"},
+ {file = "pydantic-1.10.18-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b661ce52c7b5e5f600c0c3c5839e71918346af2ef20062705ae76b5c16914cab"},
+ {file = "pydantic-1.10.18-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c20f682defc9ef81cd7eaa485879ab29a86a0ba58acf669a78ed868e72bb89e0"},
+ {file = "pydantic-1.10.18-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c5ae6b7c8483b1e0bf59e5f1843e4fd8fd405e11df7de217ee65b98eb5462861"},
+ {file = "pydantic-1.10.18-cp310-cp310-win_amd64.whl", hash = "sha256:74fe19dda960b193b0eb82c1f4d2c8e5e26918d9cda858cbf3f41dd28549cb70"},
+ {file = "pydantic-1.10.18-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:72fa46abace0a7743cc697dbb830a41ee84c9db8456e8d77a46d79b537efd7ec"},
+ {file = "pydantic-1.10.18-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ef0fe7ad7cbdb5f372463d42e6ed4ca9c443a52ce544472d8842a0576d830da5"},
+ {file = "pydantic-1.10.18-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a00e63104346145389b8e8f500bc6a241e729feaf0559b88b8aa513dd2065481"},
+ {file = "pydantic-1.10.18-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae6fa2008e1443c46b7b3a5eb03800121868d5ab6bc7cda20b5df3e133cde8b3"},
+ {file = "pydantic-1.10.18-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9f463abafdc92635da4b38807f5b9972276be7c8c5121989768549fceb8d2588"},
+ {file = "pydantic-1.10.18-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3445426da503c7e40baccefb2b2989a0c5ce6b163679dd75f55493b460f05a8f"},
+ {file = "pydantic-1.10.18-cp311-cp311-win_amd64.whl", hash = "sha256:467a14ee2183bc9c902579bb2f04c3d3dac00eff52e252850509a562255b2a33"},
+ {file = "pydantic-1.10.18-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:efbc8a7f9cb5fe26122acba1852d8dcd1e125e723727c59dcd244da7bdaa54f2"},
+ {file = "pydantic-1.10.18-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24a4a159d0f7a8e26bf6463b0d3d60871d6a52eac5bb6a07a7df85c806f4c048"},
+ {file = "pydantic-1.10.18-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b74be007703547dc52e3c37344d130a7bfacca7df112a9e5ceeb840a9ce195c7"},
+ {file = "pydantic-1.10.18-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcb20d4cb355195c75000a49bb4a31d75e4295200df620f454bbc6bdf60ca890"},
+ {file = "pydantic-1.10.18-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:46f379b8cb8a3585e3f61bf9ae7d606c70d133943f339d38b76e041ec234953f"},
+ {file = "pydantic-1.10.18-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cbfbca662ed3729204090c4d09ee4beeecc1a7ecba5a159a94b5a4eb24e3759a"},
+ {file = "pydantic-1.10.18-cp312-cp312-win_amd64.whl", hash = "sha256:c6d0a9f9eccaf7f438671a64acf654ef0d045466e63f9f68a579e2383b63f357"},
+ {file = "pydantic-1.10.18-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3d5492dbf953d7d849751917e3b2433fb26010d977aa7a0765c37425a4026ff1"},
+ {file = "pydantic-1.10.18-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe734914977eed33033b70bfc097e1baaffb589517863955430bf2e0846ac30f"},
+ {file = "pydantic-1.10.18-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15fdbe568beaca9aacfccd5ceadfb5f1a235087a127e8af5e48df9d8a45ae85c"},
+ {file = "pydantic-1.10.18-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c3e742f62198c9eb9201781fbebe64533a3bbf6a76a91b8d438d62b813079dbc"},
+ {file = "pydantic-1.10.18-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:19a3bd00b9dafc2cd7250d94d5b578edf7a0bd7daf102617153ff9a8fa37871c"},
+ {file = "pydantic-1.10.18-cp37-cp37m-win_amd64.whl", hash = "sha256:2ce3fcf75b2bae99aa31bd4968de0474ebe8c8258a0110903478bd83dfee4e3b"},
+ {file = "pydantic-1.10.18-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:335a32d72c51a313b33fa3a9b0fe283503272ef6467910338e123f90925f0f03"},
+ {file = "pydantic-1.10.18-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:34a3613c7edb8c6fa578e58e9abe3c0f5e7430e0fc34a65a415a1683b9c32d9a"},
+ {file = "pydantic-1.10.18-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9ee4e6ca1d9616797fa2e9c0bfb8815912c7d67aca96f77428e316741082a1b"},
+ {file = "pydantic-1.10.18-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23e8ec1ce4e57b4f441fc91e3c12adba023fedd06868445a5b5f1d48f0ab3682"},
+ {file = "pydantic-1.10.18-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:44ae8a3e35a54d2e8fa88ed65e1b08967a9ef8c320819a969bfa09ce5528fafe"},
+ {file = "pydantic-1.10.18-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5389eb3b48a72da28c6e061a247ab224381435256eb541e175798483368fdd3"},
+ {file = "pydantic-1.10.18-cp38-cp38-win_amd64.whl", hash = "sha256:069b9c9fc645474d5ea3653788b544a9e0ccd3dca3ad8c900c4c6eac844b4620"},
+ {file = "pydantic-1.10.18-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:80b982d42515632eb51f60fa1d217dfe0729f008e81a82d1544cc392e0a50ddf"},
+ {file = "pydantic-1.10.18-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:aad8771ec8dbf9139b01b56f66386537c6fe4e76c8f7a47c10261b69ad25c2c9"},
+ {file = "pydantic-1.10.18-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941a2eb0a1509bd7f31e355912eb33b698eb0051730b2eaf9e70e2e1589cae1d"},
+ {file = "pydantic-1.10.18-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65f7361a09b07915a98efd17fdec23103307a54db2000bb92095457ca758d485"},
+ {file = "pydantic-1.10.18-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6951f3f47cb5ca4da536ab161ac0163cab31417d20c54c6de5ddcab8bc813c3f"},
+ {file = "pydantic-1.10.18-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7a4c5eec138a9b52c67f664c7d51d4c7234c5ad65dd8aacd919fb47445a62c86"},
+ {file = "pydantic-1.10.18-cp39-cp39-win_amd64.whl", hash = "sha256:49e26c51ca854286bffc22b69787a8d4063a62bf7d83dc21d44d2ff426108518"},
+ {file = "pydantic-1.10.18-py3-none-any.whl", hash = "sha256:06a189b81ffc52746ec9c8c007f16e5167c8b0a696e1a726369327e3db7b2a82"},
+ {file = "pydantic-1.10.18.tar.gz", hash = "sha256:baebdff1907d1d96a139c25136a9bb7d17e118f133a76a2ef3b845e831e3403a"},
]
[package.dependencies]
@@ -1441,13 +1445,13 @@ resolved_reference = "4891556e7db831a5a9b27d4bad8ff102609b2a2c"
[[package]]
name = "pytest"
-version = "8.3.1"
+version = "8.3.3"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pytest-8.3.1-py3-none-any.whl", hash = "sha256:e9600ccf4f563976e2c99fa02c7624ab938296551f280835ee6516df8bc4ae8c"},
- {file = "pytest-8.3.1.tar.gz", hash = "sha256:7e8e5c5abd6e93cb1cc151f23e57adc31fcf8cfd2a3ff2da63e23f732de35db6"},
+ {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"},
+ {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"},
]
[package.dependencies]
@@ -1520,62 +1524,64 @@ files = [
[[package]]
name = "pyyaml"
-version = "6.0.1"
+version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.8"
files = [
- {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
- {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
- {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
- {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
- {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
- {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
- {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
- {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
- {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
- {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
- {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
- {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
- {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
- {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
- {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
- {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
- {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
- {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
- {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
- {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
- {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
- {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
- {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
- {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
- {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
- {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
- {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
- {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
- {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
- {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
- {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
- {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
- {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
- {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
- {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
- {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
- {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
- {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
- {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
- {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
- {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
- {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
- {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
- {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
- {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
- {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
- {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
- {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
- {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
- {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
- {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
+ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
+ {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
+ {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
+ {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
+ {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
+ {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
+ {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
+ {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
+ {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
+ {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
+ {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
+ {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
+ {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
+ {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
+ {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
+ {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
+ {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
+ {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
+ {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
+ {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
+ {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
+ {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
+ {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
+ {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
+ {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
+ {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
+ {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
+ {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
+ {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
+ {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
+ {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
]
[[package]]
@@ -2095,13 +2101,13 @@ six = "*"
[[package]]
name = "urllib3"
-version = "1.26.19"
+version = "1.26.20"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
- {file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"},
- {file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"},
+ {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"},
+ {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"},
]
[package.extras]
@@ -2311,4 +2317,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools",
[metadata]
lock-version = "2.0"
python-versions = "^3.8"
-content-hash = "fc3e4ca2984cf3f3652de0a6167bccae08a5efe76292845c01e9da8e64a6a977"
+content-hash = "0d96427eac02294dfb7ce6770111c88343202c9342ad3a9d807aeec52e7be425"
diff --git a/pyproject.toml b/pyproject.toml
index 6fcd652fc..94ce5f9a0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "splunk-connect-for-snmp"
-version = "1.11.0"
+version = "1.12.0-beta.6"
description = ""
authors = ["omrozowicz-splunk "]
license = "Apache-2.0"
@@ -12,6 +12,14 @@ traps = 'splunk_connect_for_snmp.traps:main'
inventory-loader = 'splunk_connect_for_snmp.inventory.loader:load'
run-walk = 'splunk_connect_for_snmp.walk:run_walk'
+[tool.pytest.ini_options]
+log_cli = true
+log_cli_level = "DEBUG"
+log_cli_format = "%(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)"
+log_cli_date_format = "%Y-%m-%d %H:%M:%S"
+testpaths = ["test"]
+python_files = ["test_*.py"]
+
[tool.poetry.dependencies]
python = "^3.8"
pymongo = {extras = ["srv"], version = "^4.0.0"}
@@ -26,7 +34,7 @@ opentelemetry-exporter-jaeger-thrift = "^1.10.0"
pyrate-limiter = "^2.10.0"
requests-cache = "^1.0.0"
requests-ratelimiter = "^0.7.0"
-mongoengine = "^0.28.0"
+mongoengine = "^0.29.0"
celery-redbeat = {git = "https://github.com/splunk/redbeat", branch = "main"}
PyYAML = "^6.0"
#Note this is temporary PR to upstream project is issued
@@ -59,10 +67,3 @@ build-backend = "poetry.masonry.api"
#docker run --rm -d -p 27017:27017 --name example-mongo mongo:latest
#docker run --rm -d -p 5672:5672 --hostname my-rabbit --name some-rabbit rabbitmq:3
-[tool.pytest.ini_options]
-log_cli = true
-log_cli_level = "DEBUG"
-log_cli_format = "%(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)"
-log_cli_date_format = "%Y-%m-%d %H:%M:%S"
-testpaths = ["test"]
-python_files = ["test_*.py"]
diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml
index e405f5f39..2126ed8ad 100644
--- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml
+++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml
@@ -56,6 +56,8 @@ spec:
value: "http://release-name-mibserver/standard.txt"
- name: LOG_LEVEL
value: INFO
+ - name: PYSNMP_DEBUG
+ value: ""
- name: SPLUNK_HEC_SCHEME
value: "https"
- name: SPLUNK_HEC_HOST
@@ -71,6 +73,8 @@ spec:
secretKeyRef:
name: splunk-connect-for-snmp-splunk
key: hec_token
+ - name: IPv6_ENABLED
+ value: "false"
ports:
- name: snmp-udp
containerPort: 2162
diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/service.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/service.yaml
index d65075ff0..399b57e52 100644
--- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/service.yaml
+++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/service.yaml
@@ -12,11 +12,14 @@ metadata:
app.kubernetes.io/managed-by: Helm
annotations:
metallb.universe.tf/allow-shared-ip: "splunk-connect"
+ metallb.universe.tf/loadBalancerIPs: 10.202.6.213
spec:
type: LoadBalancer
externalTrafficPolicy: Local
- loadBalancerIP: 10.202.6.213
+ ipFamilyPolicy: SingleStack
+ ipFamilies:
+ - IPv4
ports:
- port: 162
targetPort: 2162
diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
index f08f466db..5a24aa081 100644
--- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
+++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
index 911621cba..17c212667 100644
--- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
+++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
index 4b03ed67b..9b6b4a5f5 100644
--- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
+++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml
index 213f7f6bb..75437ab27 100644
--- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml
+++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml
@@ -55,6 +55,8 @@ spec:
value: "http://release-name-mibserver/standard.txt"
- name: LOG_LEVEL
value: INFO
+ - name: PYSNMP_DEBUG
+ value: ""
- name: SPLUNK_HEC_SCHEME
value: "https"
- name: SPLUNK_HEC_HOST
@@ -70,6 +72,8 @@ spec:
secretKeyRef:
name: splunk-connect-for-snmp-splunk
key: hec_token
+ - name: IPv6_ENABLED
+ value: "false"
ports:
- name: snmp-udp
containerPort: 2162
diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/service.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/service.yaml
index d65075ff0..399b57e52 100644
--- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/service.yaml
+++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/service.yaml
@@ -12,11 +12,14 @@ metadata:
app.kubernetes.io/managed-by: Helm
annotations:
metallb.universe.tf/allow-shared-ip: "splunk-connect"
+ metallb.universe.tf/loadBalancerIPs: 10.202.6.213
spec:
type: LoadBalancer
externalTrafficPolicy: Local
- loadBalancerIP: 10.202.6.213
+ ipFamilyPolicy: SingleStack
+ ipFamilies:
+ - IPv4
ports:
- port: 162
targetPort: 2162
diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
index 8f4f7c98d..a790af741 100644
--- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
+++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
@@ -65,6 +65,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
index dc9413250..1dd219cf6 100644
--- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
+++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
@@ -65,6 +65,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
index 058bde528..5660f516c 100644
--- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
+++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
@@ -65,6 +65,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/deployment.yaml
index 213f7f6bb..75437ab27 100644
--- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/deployment.yaml
+++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/deployment.yaml
@@ -55,6 +55,8 @@ spec:
value: "http://release-name-mibserver/standard.txt"
- name: LOG_LEVEL
value: INFO
+ - name: PYSNMP_DEBUG
+ value: ""
- name: SPLUNK_HEC_SCHEME
value: "https"
- name: SPLUNK_HEC_HOST
@@ -70,6 +72,8 @@ spec:
secretKeyRef:
name: splunk-connect-for-snmp-splunk
key: hec_token
+ - name: IPv6_ENABLED
+ value: "false"
ports:
- name: snmp-udp
containerPort: 2162
diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/service.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/service.yaml
index d65075ff0..399b57e52 100644
--- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/service.yaml
+++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/service.yaml
@@ -12,11 +12,14 @@ metadata:
app.kubernetes.io/managed-by: Helm
annotations:
metallb.universe.tf/allow-shared-ip: "splunk-connect"
+ metallb.universe.tf/loadBalancerIPs: 10.202.6.213
spec:
type: LoadBalancer
externalTrafficPolicy: Local
- loadBalancerIP: 10.202.6.213
+ ipFamilyPolicy: SingleStack
+ ipFamilies:
+ - IPv4
ports:
- port: 162
targetPort: 2162
diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
index 8f4f7c98d..a790af741 100644
--- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
+++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
@@ -65,6 +65,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
index dc9413250..1dd219cf6 100644
--- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
+++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
@@ -65,6 +65,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
index 058bde528..5660f516c 100644
--- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
+++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
@@ -65,6 +65,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/deployment.yaml
index e405f5f39..2126ed8ad 100644
--- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/deployment.yaml
+++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/deployment.yaml
@@ -56,6 +56,8 @@ spec:
value: "http://release-name-mibserver/standard.txt"
- name: LOG_LEVEL
value: INFO
+ - name: PYSNMP_DEBUG
+ value: ""
- name: SPLUNK_HEC_SCHEME
value: "https"
- name: SPLUNK_HEC_HOST
@@ -71,6 +73,8 @@ spec:
secretKeyRef:
name: splunk-connect-for-snmp-splunk
key: hec_token
+ - name: IPv6_ENABLED
+ value: "false"
ports:
- name: snmp-udp
containerPort: 2162
diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/service.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/service.yaml
index d65075ff0..399b57e52 100644
--- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/service.yaml
+++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/service.yaml
@@ -12,11 +12,14 @@ metadata:
app.kubernetes.io/managed-by: Helm
annotations:
metallb.universe.tf/allow-shared-ip: "splunk-connect"
+ metallb.universe.tf/loadBalancerIPs: 10.202.6.213
spec:
type: LoadBalancer
externalTrafficPolicy: Local
- loadBalancerIP: 10.202.6.213
+ ipFamilyPolicy: SingleStack
+ ipFamilies:
+ - IPv4
ports:
- port: 162
targetPort: 2162
diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
index f08f466db..5a24aa081 100644
--- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
+++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
index 911621cba..17c212667 100644
--- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
+++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
index 4b03ed67b..9b6b4a5f5 100644
--- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
+++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
index f08f466db..5a24aa081 100644
--- a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
+++ b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
index 911621cba..17c212667 100644
--- a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
+++ b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/deployment.yaml
index e405f5f39..2126ed8ad 100644
--- a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/deployment.yaml
+++ b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/deployment.yaml
@@ -56,6 +56,8 @@ spec:
value: "http://release-name-mibserver/standard.txt"
- name: LOG_LEVEL
value: INFO
+ - name: PYSNMP_DEBUG
+ value: ""
- name: SPLUNK_HEC_SCHEME
value: "https"
- name: SPLUNK_HEC_HOST
@@ -71,6 +73,8 @@ spec:
secretKeyRef:
name: splunk-connect-for-snmp-splunk
key: hec_token
+ - name: IPv6_ENABLED
+ value: "false"
ports:
- name: snmp-udp
containerPort: 2162
diff --git a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/service.yaml b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/service.yaml
index d65075ff0..399b57e52 100644
--- a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/service.yaml
+++ b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/service.yaml
@@ -12,11 +12,14 @@ metadata:
app.kubernetes.io/managed-by: Helm
annotations:
metallb.universe.tf/allow-shared-ip: "splunk-connect"
+ metallb.universe.tf/loadBalancerIPs: 10.202.6.213
spec:
type: LoadBalancer
externalTrafficPolicy: Local
- loadBalancerIP: 10.202.6.213
+ ipFamilyPolicy: SingleStack
+ ipFamilies:
+ - IPv4
ports:
- port: 162
targetPort: 2162
diff --git a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
index 911621cba..17c212667 100644
--- a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
+++ b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
index 4b03ed67b..9b6b4a5f5 100644
--- a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
+++ b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml
index e405f5f39..2126ed8ad 100644
--- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml
+++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml
@@ -56,6 +56,8 @@ spec:
value: "http://release-name-mibserver/standard.txt"
- name: LOG_LEVEL
value: INFO
+ - name: PYSNMP_DEBUG
+ value: ""
- name: SPLUNK_HEC_SCHEME
value: "https"
- name: SPLUNK_HEC_HOST
@@ -71,6 +73,8 @@ spec:
secretKeyRef:
name: splunk-connect-for-snmp-splunk
key: hec_token
+ - name: IPv6_ENABLED
+ value: "false"
ports:
- name: snmp-udp
containerPort: 2162
diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/service.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/service.yaml
index d65075ff0..399b57e52 100644
--- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/service.yaml
+++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/service.yaml
@@ -12,11 +12,14 @@ metadata:
app.kubernetes.io/managed-by: Helm
annotations:
metallb.universe.tf/allow-shared-ip: "splunk-connect"
+ metallb.universe.tf/loadBalancerIPs: 10.202.6.213
spec:
type: LoadBalancer
externalTrafficPolicy: Local
- loadBalancerIP: 10.202.6.213
+ ipFamilyPolicy: SingleStack
+ ipFamilies:
+ - IPv4
ports:
- port: 162
targetPort: 2162
diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
index 270c1ce9f..af48a9368 100644
--- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
+++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
index f64e6b4ed..5118bb8a9 100644
--- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
+++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
index cef9e439e..37c3334f7 100644
--- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
+++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml
@@ -66,6 +66,8 @@ spec:
value: "3"
- name: MAX_OID_TO_PROCESS
value: "70"
+ - name: PYSNMP_DEBUG
+ value: ""
- name: PROFILES_RELOAD_DELAY
value: "60"
- name: MIB_SOURCES
diff --git a/splunk_connect_for_snmp/__init__.py b/splunk_connect_for_snmp/__init__.py
index bb68a333d..9482bb002 100644
--- a/splunk_connect_for_snmp/__init__.py
+++ b/splunk_connect_for_snmp/__init__.py
@@ -15,4 +15,4 @@
#
-__version__ = "1.11.0"
+__version__ = "1.12.0-beta.6"
diff --git a/splunk_connect_for_snmp/common/inventory_processor.py b/splunk_connect_for_snmp/common/inventory_processor.py
index 5e8c3a4c2..0a87cb599 100644
--- a/splunk_connect_for_snmp/common/inventory_processor.py
+++ b/splunk_connect_for_snmp/common/inventory_processor.py
@@ -33,7 +33,7 @@
def transform_key_to_address(target):
if ":" in target:
- address, port = target.split(":")
+ address, port = target.rsplit(":", 1)
else:
return target, 161
return address, int(port)
@@ -41,7 +41,7 @@ def transform_key_to_address(target):
def transform_address_to_key(address, port):
if not port or int(port) == 161:
- return address
+ return f"{address}:161"
else:
return f"{address}:{port}"
diff --git a/splunk_connect_for_snmp/snmp/auth.py b/splunk_connect_for_snmp/snmp/auth.py
index f53e62f55..bb762d4fb 100644
--- a/splunk_connect_for_snmp/snmp/auth.py
+++ b/splunk_connect_for_snmp/snmp/auth.py
@@ -20,6 +20,7 @@
CommunityData,
ContextData,
SnmpEngine,
+ Udp6TransportTarget,
UdpTransportTarget,
UsmUserData,
getCmd,
@@ -54,9 +55,7 @@ def get_secret_value(
def get_security_engine_id(logger, ir: InventoryRecord, snmp_engine: SnmpEngine):
observer_context: Dict[Any, Any] = {}
- transport_target = UdpTransportTarget(
- (ir.address, ir.port), timeout=UDP_CONNECTION_TIMEOUT
- )
+ transport_target = setup_transport_target(ir)
# Register a callback to be invoked at specified execution point of
# SNMP Engine and passed local variables at execution point's local scope
@@ -87,6 +86,18 @@ def get_security_engine_id(logger, ir: InventoryRecord, snmp_engine: SnmpEngine)
return security_engine_id
+def setup_transport_target(ir):
+ if ":" in ir.address:
+ transport = Udp6TransportTarget(
+ (ir.address, ir.port), timeout=UDP_CONNECTION_TIMEOUT
+ )
+ else:
+ transport = UdpTransportTarget(
+ (ir.address, ir.port), timeout=UDP_CONNECTION_TIMEOUT
+ )
+ return transport
+
+
def fetch_security_engine_id(observer_context, error_indication, ipaddress):
if "securityEngineId" in observer_context:
return observer_context["securityEngineId"]
diff --git a/splunk_connect_for_snmp/snmp/manager.py b/splunk_connect_for_snmp/snmp/manager.py
index 32c8756d7..3017c8237 100644
--- a/splunk_connect_for_snmp/snmp/manager.py
+++ b/splunk_connect_for_snmp/snmp/manager.py
@@ -38,7 +38,7 @@
import pymongo
from celery import Task
from celery.utils.log import get_task_logger
-from pysnmp.hlapi import SnmpEngine, UdpTransportTarget, bulkCmd, getCmd
+from pysnmp.hlapi import SnmpEngine, bulkCmd, getCmd
from pysnmp.smi import compiler, view
from pysnmp.smi.rfc1902 import ObjectIdentity, ObjectType
from requests_cache import MongoCache
@@ -46,7 +46,7 @@
from splunk_connect_for_snmp.common.hummanbool import human_bool
from splunk_connect_for_snmp.common.inventory_record import InventoryRecord
from splunk_connect_for_snmp.common.requests import CachedLimiterSession
-from splunk_connect_for_snmp.snmp.auth import get_auth
+from splunk_connect_for_snmp.snmp.auth import get_auth, setup_transport_target
from splunk_connect_for_snmp.snmp.context import get_context_data
from splunk_connect_for_snmp.snmp.exceptions import SnmpActionError
@@ -63,6 +63,7 @@
PROFILES_RELOAD_DELAY = int(os.getenv("PROFILES_RELOAD_DELAY", "60"))
UDP_CONNECTION_TIMEOUT = int(os.getenv("UDP_CONNECTION_TIMEOUT", 3))
MAX_OID_TO_PROCESS = int(os.getenv("MAX_OID_TO_PROCESS", 70))
+PYSNMP_DEBUG = os.getenv("PYSNMP_DEBUG", "")
DEFAULT_STANDARD_MIBS = [
"HOST-RESOURCES-MIB",
@@ -72,12 +73,33 @@
"TCP-MIB",
"UDP-MIB",
]
+
logger = get_task_logger(__name__)
+if PYSNMP_DEBUG:
+ # Usage: PYSNMP_DEBUG=dsp,msgproc,io
+
+ # List of available debug flags:
+ # io, dsp, msgproc, secmod, mibbuild, mibview, mibinstrum, acl, proxy, app, all
+
+ from pysnmp import debug
+
+ debug_flags = list(debug.flagMap.keys())
+ enabled_debug_flags = [
+ debug_flag.strip()
+ for debug_flag in PYSNMP_DEBUG.split(",")
+ if debug_flag.strip() in debug_flags
+ ]
+
+ if enabled_debug_flags:
+ debug.setLogger(
+ debug.Debug(*enabled_debug_flags, options={"loggerName": logger})
+ )
+
def return_address_and_port(target):
if ":" in target:
- address_tuple = target.split(":")
+ address_tuple = target.rsplit(":", 1)
return address_tuple[0], int(address_tuple[1])
else:
return target, 161
@@ -294,7 +316,7 @@ def do_work(
self,
ir: InventoryRecord,
walk: bool = False,
- profiles: List[str] = None,
+ profiles: Union[List[str], None] = None,
):
retry = False
address = transform_address_to_key(ir.address, ir.port)
@@ -313,9 +335,7 @@ def do_work(
auth_data = get_auth(logger, ir, self.snmpEngine)
context_data = get_context_data()
- transport = UdpTransportTarget(
- (ir.address, ir.port), timeout=UDP_CONNECTION_TIMEOUT
- )
+ transport = setup_transport_target(ir)
metrics: Dict[str, Any] = {}
if not varbinds_get and not varbinds_bulk:
@@ -323,21 +343,60 @@ def do_work(
return False, {}
if varbinds_bulk:
+ self.run_bulk_request(
+ address,
+ auth_data,
+ bulk_mapping,
+ context_data,
+ ir,
+ metrics,
+ transport,
+ varbinds_bulk,
+ walk,
+ )
+
+ if varbinds_get:
+ self.run_get_request(
+ address,
+ auth_data,
+ context_data,
+ get_mapping,
+ ir,
+ metrics,
+ transport,
+ varbinds_get,
+ walk,
+ )
+
+ for group_key, metric in metrics.items():
+ if "profiles" in metrics[group_key]:
+ metrics[group_key]["profiles"] = ",".join(
+ metrics[group_key]["profiles"]
+ )
+
+ return retry, metrics
+
+ def run_get_request(
+ self,
+ address,
+ auth_data,
+ context_data,
+ get_mapping,
+ ir,
+ metrics,
+ transport,
+ varbinds_get,
+ walk,
+ ):
+ # some devices cannot process more OID than X, so it is necessary to divide it on chunks
+ for varbind_chunk in self.get_varbind_chunk(varbinds_get, MAX_OID_TO_PROCESS):
for (
error_indication,
error_status,
error_index,
varbind_table,
- ) in bulkCmd(
- self.snmpEngine,
- auth_data,
- transport,
- context_data,
- 1,
- 10,
- *varbinds_bulk,
- lexicographicMode=False,
- ignoreNonIncreasingOid=is_increasing_oids_ignored(ir.address, ir.port),
+ ) in getCmd(
+ self.snmpEngine, auth_data, transport, context_data, *varbind_chunk
):
if not _any_failure_happened(
error_indication,
@@ -347,47 +406,52 @@ def do_work(
ir.address,
walk,
):
- _, tmp_mibs, _ = self.process_snmp_data(
- varbind_table, metrics, address, bulk_mapping
- )
- if tmp_mibs:
- self.load_mibs(tmp_mibs)
- self.process_snmp_data(
- varbind_table, metrics, address, bulk_mapping
- )
+ self.process_snmp_data(varbind_table, metrics, address, get_mapping)
- if varbinds_get:
- # some devices cannot process more OID than X, so it is necessary to divide it on chunks
- for varbind_chunk in self.get_varbind_chunk(
- varbinds_get, MAX_OID_TO_PROCESS
+ def run_bulk_request(
+ self,
+ address,
+ auth_data,
+ bulk_mapping,
+ context_data,
+ ir,
+ metrics,
+ transport,
+ varbinds_bulk,
+ walk,
+ ):
+ for (
+ error_indication,
+ error_status,
+ error_index,
+ varbind_table,
+ ) in bulkCmd(
+ self.snmpEngine,
+ auth_data,
+ transport,
+ context_data,
+ 1,
+ 10,
+ *varbinds_bulk,
+ lexicographicMode=False,
+ ignoreNonIncreasingOid=is_increasing_oids_ignored(ir.address, ir.port),
+ ):
+ if not _any_failure_happened(
+ error_indication,
+ error_status,
+ error_index,
+ varbind_table,
+ ir.address,
+ walk,
):
- for (
- error_indication,
- error_status,
- error_index,
- varbind_table,
- ) in getCmd(
- self.snmpEngine, auth_data, transport, context_data, *varbind_chunk
- ):
- if not _any_failure_happened(
- error_indication,
- error_status,
- error_index,
- varbind_table,
- ir.address,
- walk,
- ):
- self.process_snmp_data(
- varbind_table, metrics, address, get_mapping
- )
-
- for group_key, metric in metrics.items():
- if "profiles" in metrics[group_key]:
- metrics[group_key]["profiles"] = ",".join(
- metrics[group_key]["profiles"]
+ _, tmp_mibs, _ = self.process_snmp_data(
+ varbind_table, metrics, address, bulk_mapping
)
-
- return retry, metrics
+ if tmp_mibs:
+ self.load_mibs(tmp_mibs)
+ self.process_snmp_data(
+ varbind_table, metrics, address, bulk_mapping
+ )
def get_varbind_chunk(self, lst, n):
for i in range(0, len(lst), n):
@@ -452,80 +516,119 @@ def process_snmp_data(self, varbind_table, metrics, target, mapping={}):
retry = False
remotemibs = []
for varbind in varbind_table:
- mib, metric, index = varbind[0].getMibSymbol()
-
- varbind_id = varbind[0].prettyPrint()
- oid = str(varbind[0].getOid())
+ index, metric, mib, oid, varbind_id = self.init_snmp_data(varbind)
if is_mib_resolved(varbind_id):
group_key = get_group_key(mib, oid, index)
- if group_key not in metrics:
- indexes = extract_indexes(index)
- metrics[group_key] = {
- "metrics": {},
- "fields": {},
- "indexes": indexes,
- }
- if mapping:
- metrics[group_key]["profiles"] = []
+ self.handle_groupkey_without_metrics(group_key, index, mapping, metrics)
try:
- snmp_val = varbind[1]
- snmp_type = type(snmp_val).__name__
-
- metric_type = map_metric_type(snmp_type, snmp_val)
- metric_value = value_as_best(snmp_val.prettyPrint())
-
- index_number = extract_index_number(index)
- metric_value = fill_empty_value(index_number, metric_value, target)
-
- profile = None
- if mapping:
- profile = mapping.get(
- varbind_id.replace('"', ""),
- mapping.get(f"{mib}::{metric}", mapping.get(mib)),
- )
- # when varbind name differs from mib-family,
- # we are checking if there's any key that includes this mib to get profile
- if not profile:
- key = [
- prof
- for mib_map, prof in mapping.items()
- if mib in mib_map
- ]
- if key:
- profile = key[0]
- if profile and "__" in profile:
- profile = profile.split("__")[0]
+ metric_type, metric_value = self.set_metrics_index(
+ index, target, varbind
+ )
+
+ profile = self.set_profile_name(mapping, metric, mib, varbind_id)
if metric_value == "No more variables left in this MIB View":
continue
- if metric_type in MTYPES and (isinstance(metric_value, float)):
- metrics[group_key]["metrics"][f"{mib}.{metric}"] = {
- "time": time.time(),
- "type": metric_type,
- "value": metric_value,
- "oid": oid,
- }
- if profile and profile not in metrics[group_key]["profiles"]:
- metrics[group_key]["profiles"].append(profile)
- else:
- metrics[group_key]["fields"][f"{mib}.{metric}"] = {
- "time": time.time(),
- "type": metric_type,
- "value": metric_value,
- "oid": oid,
- }
+ self.handle_metrics(
+ group_key,
+ metric,
+ metric_type,
+ metric_value,
+ metrics,
+ mib,
+ oid,
+ profile,
+ )
except Exception:
logger.exception(
f"Exception processing data from {target} {varbind}"
)
else:
- found, mib = self.is_mib_known(varbind_id, oid, target)
- if mib and mib not in remotemibs:
- remotemibs.append(mib)
+ found = self.find_new_mibs(oid, remotemibs, target, varbind_id)
if found:
retry = True
break
return retry, remotemibs, metrics
+
+ def find_new_mibs(self, oid, remotemibs, target, varbind_id):
+ found, mib = self.is_mib_known(varbind_id, oid, target)
+ if mib and mib not in remotemibs:
+ remotemibs.append(mib)
+ return found
+
+ def handle_metrics(
+ self, group_key, metric, metric_type, metric_value, metrics, mib, oid, profile
+ ):
+ if metric_type in MTYPES and (isinstance(metric_value, float)):
+ metrics[group_key]["metrics"][f"{mib}.{metric}"] = {
+ "time": time.time(),
+ "type": metric_type,
+ "value": metric_value,
+ "oid": oid,
+ }
+ if profile and profile not in metrics[group_key]["profiles"]:
+ metrics[group_key]["profiles"].append(profile)
+ else:
+ metrics[group_key]["fields"][f"{mib}.{metric}"] = {
+ "time": time.time(),
+ "type": metric_type,
+ "value": metric_value,
+ "oid": oid,
+ }
+
+ def set_profile_name(self, mapping, metric, mib, varbind_id):
+ """
+ Finds the profile named based on the passed data.
+ """
+ profile = None
+ if mapping:
+ profile = mapping.get(
+ varbind_id.replace('"', ""),
+ mapping.get(f"{mib}::{metric}", mapping.get(mib)),
+ )
+ # when varbind name differs from mib-family,
+ # we are checking if there's any key that includes this mib to get profile
+ profile = self.match_mapping_to_profile(mapping, mib, profile)
+ profile = self.clean_profile_name(profile)
+ return profile
+
+ def clean_profile_name(self, profile):
+ if profile and "__" in profile:
+ profile = profile.split("__")[0]
+ return profile
+
+ def match_mapping_to_profile(self, mapping, mib, profile):
+ if not profile:
+ key = [prof for mib_map, prof in mapping.items() if mib in mib_map]
+ if key:
+ profile = key[0]
+ return profile
+
+ def set_metrics_index(self, index, target, varbind):
+ snmp_val = varbind[1]
+ snmp_type = type(snmp_val).__name__
+ metric_type = map_metric_type(snmp_type, snmp_val)
+ metric_value = value_as_best(snmp_val.prettyPrint())
+ index_number = extract_index_number(index)
+ metric_value = fill_empty_value(index_number, metric_value, target)
+ return metric_type, metric_value
+
+ def handle_groupkey_without_metrics(self, group_key, index, mapping, metrics):
+ if group_key not in metrics:
+ indexes = extract_indexes(index)
+ metrics[group_key] = {
+ "metrics": {},
+ "fields": {},
+ "indexes": indexes,
+ }
+ if mapping:
+ metrics[group_key]["profiles"] = []
+
+ def init_snmp_data(self, varbind):
+ mib, metric, index = varbind[0].getMibSymbol()
+ varbind_id = varbind[0].prettyPrint()
+ oid = str(varbind[0].getOid())
+ return index, metric, mib, oid, varbind_id
diff --git a/splunk_connect_for_snmp/traps.py b/splunk_connect_for_snmp/traps.py
index ba0d34ba8..1b567d0de 100644
--- a/splunk_connect_for_snmp/traps.py
+++ b/splunk_connect_for_snmp/traps.py
@@ -18,6 +18,7 @@
from pysnmp.proto.api import v2c
+from splunk_connect_for_snmp.common.hummanbool import human_bool
from splunk_connect_for_snmp.snmp.auth import get_secret_value
with suppress(ImportError, OSError):
@@ -27,13 +28,14 @@
import asyncio
import os
+import sys
from typing import Any, Dict
import yaml
from celery import Celery, chain
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
-from pysnmp.carrier.asyncio.dgram import udp
+from pysnmp.carrier.asyncio.dgram import udp, udp6
from pysnmp.entity import config, engine
from pysnmp.entity.rfc3413 import ntfrcv
@@ -48,11 +50,40 @@
SECURITY_ENGINE_ID_LIST = os.getenv("SNMP_V3_SECURITY_ENGINE_ID", "80003a8c04").split(
","
)
+IPv6_ENABLED = human_bool(os.getenv("IPv6_ENABLED", "false").lower())
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
+PYSNMP_DEBUG = os.getenv("PYSNMP_DEBUG", "")
+
+logger = logging.getLogger(__name__)
+
+formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
+handler = logging.StreamHandler(sys.stdout)
+handler.setFormatter(formatter)
+handler.setLevel(getattr(logging, LOG_LEVEL))
+
+logger.addHandler(handler)
+
+
+if PYSNMP_DEBUG:
+ # Usage: PYSNMP_DEBUG=dsp,msgproc,io
+
+ # List of available debug flags:
+ # io, dsp, msgproc, secmod, mibbuild, mibview, mibinstrum, acl, proxy, app, all
+
+ from pysnmp import debug
+
+ debug_flags = list(debug.flagMap.keys())
+ enabled_debug_flags = [
+ debug_flag.strip()
+ for debug_flag in PYSNMP_DEBUG.split(",")
+ if debug_flag.strip() in debug_flags
+ ]
+
+ if enabled_debug_flags:
+ debug.setLogger(
+ debug.Debug(*enabled_debug_flags, options={"loggerName": logger})
+ )
-logging.basicConfig(
- level=getattr(logging, LOG_LEVEL), format="%(asctime)s %(levelname)s %(message)s"
-)
# //using rabbitmq as the message broker
app = Celery("sc4snmp_traps")
@@ -68,7 +99,7 @@
def cb_fun(
snmp_engine, state_reference, context_engine_id, context_name, varbinds, cb_ctx
):
- logging.debug(
+ logger.debug(
'Notification from ContextEngineId "%s", ContextName "%s"'
% (context_engine_id.prettyPrint(), context_name.prettyPrint())
)
@@ -94,7 +125,7 @@ def cb_fun(
# Callback function for logging traps authentication errors
def authentication_observer_cb_fun(snmp_engine, execpoint, variables, contexts):
- logging.error(
+ logger.error(
f"Security Model failure for device {variables.get('transportAddress', None)}: "
f"{variables.get('statusInformation', {}).get('errorIndication', None)}"
)
@@ -135,6 +166,14 @@ def main():
udp.domainName,
udp.UdpTransport().openServerMode(("0.0.0.0", 2162)),
)
+
+ if IPv6_ENABLED:
+ config.addTransport(
+ snmp_engine,
+ udp6.domainName,
+ udp6.Udp6Transport().openServerMode(("::", 2163)),
+ )
+
with open(CONFIG_PATH, encoding="utf-8") as file:
config_base = yaml.safe_load(file)
idx = 0
@@ -154,13 +193,13 @@ def main():
priv_key = get_secret_value(location, "privKey", required=False)
auth_protocol = get_secret_value(location, "authProtocol", required=False)
- logging.debug(f"authProtocol: {auth_protocol}")
+ logger.debug(f"authProtocol: {auth_protocol}")
auth_protocol = AuthProtocolMap.get(auth_protocol.upper(), "NONE")
priv_protocol = get_secret_value(
location, "privProtocol", required=False, default="NONE"
)
- logging.debug(f"privProtocol: {priv_protocol}")
+ logger.debug(f"privProtocol: {priv_protocol}")
priv_protocol = PrivProtocolMap.get(priv_protocol.upper(), "NONE")
for security_engine_id in SECURITY_ENGINE_ID_LIST:
@@ -173,7 +212,7 @@ def main():
privKey=priv_key,
securityEngineId=v2c.OctetString(hexValue=security_engine_id),
)
- logging.debug(
+ logger.debug(
f"V3 users: {username} auth {auth_protocol} authkey {len(auth_key)*'*'} privprotocol {priv_protocol} "
f"privkey {len(priv_key)*'*'} securityEngineId {len(security_engine_id)*'*'}"
)
diff --git a/test/common/test_inventory_processor.py b/test/common/test_inventory_processor.py
index e4b337191..24cf83bf9 100644
--- a/test/common/test_inventory_processor.py
+++ b/test/common/test_inventory_processor.py
@@ -13,10 +13,16 @@
mock_inventory_only_address = """address
54.234.85.76"""
+mock_inventory_only_address_ipv6 = """address
+2001:0db8:ac10:fe01::0001"""
+
mock_inventory_host_same_as_in_group = """address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete
group1,,2c,public,,,1805,group_profile,False,False
0.0.0.0,,2c,public,,,1805,solo_profile1,False,False
-0.0.0.0,1161,2c,public,,,1805,solo_profile2,False,False"""
+0.0.0.0,1161,2c,public,,,1805,solo_profile2,False,False
+2001:0db8:ac10:fe01::0001,,2c,public,,,1805,solo_profile3,False,False
+2001:0db8:ac10:fe01:0000:0000:0000:0001,1166,2c,public,,,1805,solo_profile4,False,False
+"""
class TestInventoryProcessor(TestCase):
@@ -47,10 +53,30 @@ def test_transform_key_to_address(self):
self.assertEqual(("123.0.0.1", 777), transform_key_to_address("123.0.0.1:777"))
self.assertEqual(("123.0.0.1", 161), transform_key_to_address("123.0.0.1:161"))
self.assertEqual(("123.0.0.1", 161), transform_key_to_address("123.0.0.1"))
+ self.assertEqual(
+ ("2001:0db8:ac10:fe01:0000:0000:0000:0001", 161),
+ transform_key_to_address("2001:0db8:ac10:fe01:0000:0000:0000:0001:161"),
+ )
+ self.assertEqual(
+ ("2001:0db8:ac10:fe01:0000:0000:0000:0001", 333),
+ transform_key_to_address("2001:0db8:ac10:fe01:0000:0000:0000:0001:333"),
+ )
+ self.assertEqual(
+ ("2001:0db8:ac10:fe01::0001", 161),
+ transform_key_to_address("2001:0db8:ac10:fe01::0001:161"),
+ )
def test_transform_address_to_key(self):
self.assertEqual(transform_address_to_key("127.0.0.1", 333), "127.0.0.1:333")
- self.assertEqual(transform_address_to_key("127.0.0.1", 161), "127.0.0.1")
+ self.assertEqual(transform_address_to_key("127.0.0.1", 161), "127.0.0.1:161")
+ self.assertEqual(
+ transform_address_to_key("2001:0db8:ac10:fe01::0001", 161),
+ "2001:0db8:ac10:fe01::0001:161",
+ )
+ self.assertEqual(
+ transform_address_to_key("2001:0db8:ac10:fe01:0000:0000:0000:0001", 333),
+ "2001:0db8:ac10:fe01:0000:0000:0000:0001:333",
+ )
def test_return_hosts_from_deleted_groups_one_host(self):
previous_groups = {
@@ -80,6 +106,34 @@ def test_return_hosts_from_deleted_groups_one_host(self):
["1.1.1.1:162"],
)
+ def test_return_hosts_from_deleted_groups_one_host_ipv6(self):
+ previous_groups = {
+ "group1": [
+ {"address": "2001:0db8:ac10:fe01::0001", "port": 161},
+ {"address": "2001:0db8:bc10:fe03:0000:0000:0000:0001", "port": 999},
+ ],
+ "switches": [
+ {"address": "fd02::ae84:454f:3e03:4c80", "port": 161},
+ {"address": "fd01::bc60:000f:4e02:3c70", "port": 162},
+ ],
+ }
+ new_groups = {
+ "group1": [
+ {"address": "2001:0db8:ac10:fe01::0001", "port": 161},
+ {"address": "2001:0db8:bc10:fe03:0000:0000:0000:0001", "port": 999},
+ ],
+ "switches": [{"address": "fd02::ae84:454f:3e03:4c80", "port": 161}],
+ }
+
+ self.assertEqual(
+ return_hosts_from_deleted_groups(
+ previous_groups,
+ new_groups,
+ {"group1": {"port": 161}, "switches": {"port": 161}},
+ ),
+ ["fd01::bc60:000f:4e02:3c70:162"],
+ )
+
def test_return_hosts_from_deleted_groups_whole_group(self):
previous_groups = {
"group1": [
@@ -104,7 +158,7 @@ def test_return_hosts_from_deleted_groups_whole_group(self):
new_groups,
{"group1": 161, "switches": 161},
),
- ["12.22.23.33", "1.1.1.1:162"],
+ ["12.22.23.33:161", "1.1.1.1:162"],
)
def test_return_hosts_from_deleted_groups_one_host_and_group(self):
@@ -128,7 +182,7 @@ def test_return_hosts_from_deleted_groups_one_host_and_group(self):
new_groups,
{"group1": 161, "switches": 161},
),
- ["123.0.0.1", "178.8.8.1:999", "1.1.1.1:162"],
+ ["123.0.0.1:161", "178.8.8.1:999", "1.1.1.1:162"],
)
def test_return_hosts_empty(self):
@@ -283,6 +337,17 @@ def test_process_line_group(self):
inventory_processor.process_line(source_record)
inventory_processor.get_group_hosts.assert_called_with(source_record, "group1")
+ @patch(
+ "builtins.open",
+ new_callable=mock_open,
+ read_data=mock_inventory_only_address_ipv6,
+ )
+ def test_process_line_host_ipv6(self, m_inventory):
+ source_record = {"address": "2001:0db8:ac10:fe01::0001"}
+ inventory_processor = InventoryProcessor(Mock(), Mock(), Mock())
+ inventory_processor.get_all_hosts()
+ self.assertEqual(inventory_processor.inventory_records, [source_record])
+
@mock.patch(
"splunk_connect_for_snmp.common.collection_manager.CONFIG_FROM_MONGO",
False,
@@ -302,6 +367,7 @@ def test_ignore_line_host_configured_in_group(self, m_load_element):
"group1": [
{"address": "0.0.0.0", "port": "161"},
{"address": "127.0.0.1", "port": "161"},
+ {"address": "2001:0db8:ac10:fe01::0001", "port": "161"},
]
}
]
@@ -335,6 +401,19 @@ def test_ignore_line_host_configured_in_group(self, m_load_element):
"delete": "False",
"group": "group1",
},
+ {
+ "address": "2001:0db8:ac10:fe01::0001",
+ "port": "161",
+ "version": "2c",
+ "community": "public",
+ "secret": "",
+ "security_engine": "",
+ "walk_interval": "1805",
+ "profiles": "group_profile",
+ "smart_profiles": "False",
+ "delete": "False",
+ "group": "group1",
+ },
{
"address": "0.0.0.0",
"port": "1161",
@@ -347,6 +426,18 @@ def test_ignore_line_host_configured_in_group(self, m_load_element):
"smart_profiles": "False",
"delete": "False",
},
+ {
+ "address": "2001:0db8:ac10:fe01:0000:0000:0000:0001",
+ "port": "1166",
+ "version": "2c",
+ "community": "public",
+ "secret": "",
+ "security_engine": "",
+ "walk_interval": "1805",
+ "profiles": "solo_profile4",
+ "smart_profiles": "False",
+ "delete": "False",
+ },
]
inventory_processor.get_all_hosts()
self.assertEqual(expected, inventory_processor.inventory_records)
@@ -374,39 +465,37 @@ def test_return_walk_profile_more_than_one(self):
def test_return_walk_profile_no_walk_in_inventory(self):
inventory_profiles = ["generic_switch"]
inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock())
- self.assertEqual(
+ self.assertIsNone(
inventory_record_manager.return_walk_profile(
self.profiles, inventory_profiles
- ),
- None,
+ )
)
def test_return_walk_profile_no_walk_in_config(self):
inventory_profiles = ["generic_switch", "walk2"]
inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock())
- self.assertEqual(
+ self.assertIsNone(
inventory_record_manager.return_walk_profile(
self.profiles, inventory_profiles
- ),
- None,
+ )
)
def test_return_walk_profile_no_config(self):
inventory_profiles = ["generic_switch", "walk2"]
inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock())
- self.assertEqual(
- inventory_record_manager.return_walk_profile({}, inventory_profiles), None
+ self.assertIsNone(
+ inventory_record_manager.return_walk_profile({}, inventory_profiles)
)
def test_return_walk_profile_no_config_no_inventory(self):
inventory_profiles = []
inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock())
- self.assertEqual(
- inventory_record_manager.return_walk_profile({}, inventory_profiles), None
+ self.assertIsNone(
+ inventory_record_manager.return_walk_profile({}, inventory_profiles)
)
def test_return_walk_profile_no_inventory(self):
inventory_record_manager = InventoryRecordManager(Mock(), Mock(), Mock())
- self.assertEqual(
- inventory_record_manager.return_walk_profile(self.profiles, []), None
+ self.assertIsNone(
+ inventory_record_manager.return_walk_profile(self.profiles, [])
)
diff --git a/test/inventory/test_loader.py b/test/inventory/test_loader.py
index a31c013ab..c970626e7 100644
--- a/test/inventory/test_loader.py
+++ b/test/inventory/test_loader.py
@@ -152,13 +152,13 @@ def test_walk_task_for_port_161(self):
result = gen_walk_task(inventory_record)
- self.assertEqual("sc4snmp;192.68.0.1;walk", result["name"])
+ self.assertEqual("sc4snmp;192.68.0.1:161;walk", result["name"])
self.assertEqual("splunk_connect_for_snmp.snmp.tasks.walk", result["task"])
- self.assertEqual("192.68.0.1", result["target"])
+ self.assertEqual("192.68.0.1:161", result["target"])
self.assertEqual([], result["args"])
self.assertEqual(
{
- "address": "192.68.0.1",
+ "address": "192.68.0.1:161",
"profile": None,
"chain_of_tasks_expiry_time": chain_of_tasks_expiry_time,
},
@@ -250,10 +250,10 @@ def test_load_new_record_small_walk(
periodic_obj_mock = Mock()
m_taskManager.return_value = periodic_obj_mock
m_load_profiles.return_value = profiles
- self.assertEqual(False, load())
+ self.assertFalse(load())
self.assertEqual(
{
- "address": "192.168.0.1",
+ "address": "192.168.0.1:161",
"profile": "walk2",
"chain_of_tasks_expiry_time": 120,
},
@@ -305,7 +305,7 @@ def test_load_new_record(
periodic_obj_mock = Mock()
m_taskManager.return_value = periodic_obj_mock
m_load_profiles.return_value = default_profiles
- self.assertEqual(False, load())
+ self.assertFalse(load())
periodic_obj_mock.manage_task.assert_called_with(**expected_managed_task)
@@ -354,7 +354,7 @@ def test_load_modified_record(
periodic_obj_mock = Mock()
m_taskManager.return_value = periodic_obj_mock
m_load_profiles.return_value = default_profiles
- self.assertEqual(False, load())
+ self.assertFalse(load())
periodic_obj_mock.manage_task.assert_called_with(**expected_managed_task)
@@ -405,7 +405,7 @@ def test_load_unchanged_record(
periodic_obj_mock.did_expiry_time_change.return_value = False
m_migrate.return_value = False
m_load_profiles.return_value = default_profiles
- self.assertEqual(False, load())
+ self.assertFalse(load())
periodic_obj_mock.manage_task.assert_not_called()
@@ -458,7 +458,7 @@ def test_load_unchanged_record_with_new_expiry_time(
m_taskManager.return_value = periodic_obj_mock
periodic_obj_mock.did_expiry_time_change.return_value = True
m_load_profiles.return_value = default_profiles
- self.assertEqual(False, load())
+ self.assertFalse(load())
periodic_obj_mock.manage_task.assert_called_with(**expected_managed_task)
@@ -504,7 +504,7 @@ def test_ignoring_comment(
m_taskManager.return_value = periodic_obj_mock
m_taskManager.get_chain_of_task_expiry.return_value = 180
m_load_profiles.return_value = default_profiles
- self.assertEqual(False, load())
+ self.assertFalse(load())
m_mongo_collection.assert_not_called()
periodic_obj_mock.manage_task.assert_not_called()
@@ -550,16 +550,16 @@ def test_deleting_record(
periodic_obj_mock = Mock()
m_taskManager.return_value = periodic_obj_mock
m_load_profiles.return_value = default_profiles
- self.assertEqual(False, load())
+ self.assertFalse(load())
- periodic_obj_mock.delete_all_tasks_of_host.assert_called_with("192.168.0.1")
+ periodic_obj_mock.delete_all_tasks_of_host.assert_called_with("192.168.0.1:161")
m_delete.assert_called_with({"address": "192.168.0.1", "port": 161})
calls = m_remove.call_args_list
self.assertEqual(2, len(calls))
- self.assertEqual(({"address": "192.168.0.1"},), calls[0].args)
- self.assertEqual(({"address": "192.168.0.1"},), calls[1].args)
+ self.assertEqual(({"address": "192.168.0.1:161"},), calls[0].args)
+ self.assertEqual(({"address": "192.168.0.1:161"},), calls[1].args)
@mock.patch(
"splunk_connect_for_snmp.common.inventory_processor.CONFIG_FROM_MONGO", False
@@ -606,7 +606,7 @@ def test_deleting_record_non_default_port(
periodic_obj_mock = Mock()
m_taskManager.return_value = periodic_obj_mock
m_load_profiles.return_value = default_profiles
- self.assertEqual(False, load())
+ self.assertFalse(load())
periodic_obj_mock.delete_all_tasks_of_host.assert_called_with("192.168.0.1:345")
m_delete.assert_called_with({"address": "192.168.0.1", "port": 345})
@@ -669,11 +669,11 @@ def test_inventory_errors(
m_manage_task.side_effect = Exception("Boom!")
m_load_profiles.return_value = default_profiles
- self.assertEqual(True, load())
+ self.assertTrue(load())
def test_transform_address_to_key_161(self):
- self.assertEqual(transform_address_to_key("127.0.0.1", 161), "127.0.0.1")
- self.assertEqual(transform_address_to_key("127.0.0.1", "161"), "127.0.0.1")
+ self.assertEqual(transform_address_to_key("127.0.0.1", 161), "127.0.0.1:161")
+ self.assertEqual(transform_address_to_key("127.0.0.1", "161"), "127.0.0.1:161")
def test_transform_address_to_key(self):
self.assertEqual(transform_address_to_key("127.0.0.1", 32), "127.0.0.1:32")
diff --git a/test/snmp/test_auth.py b/test/snmp/test_auth.py
index 771b78292..fe21be8e2 100644
--- a/test/snmp/test_auth.py
+++ b/test/snmp/test_auth.py
@@ -1,5 +1,5 @@
from unittest import TestCase
-from unittest.mock import Mock, mock_open, patch
+from unittest.mock import MagicMock, Mock, mock_open, patch
from pysnmp.entity.config import (
usmAesBlumenthalCfb192Protocol,
@@ -18,6 +18,7 @@
get_auth_v3,
get_secret_value,
get_security_engine_id,
+ setup_transport_target,
)
from splunk_connect_for_snmp.snmp.exceptions import SnmpActionError
@@ -269,8 +270,8 @@ def test_get_auth_v3_noauthnopriv(self, m_get_secret_value, m_exists):
result = get_auth_v3(logger, ir, snmpEngine)
security_engine_result = OctetString(hexValue="80003a8c04")
self.assertEqual("secret1", result.userName)
- self.assertEqual(None, result.authKey)
- self.assertEqual(None, result.privKey)
+ self.assertIsNone(result.authKey)
+ self.assertIsNone(result.privKey)
self.assertEqual("noAuthNoPriv", result.securityLevel)
self.assertEqual(usmNoAuthProtocol, result.authProtocol)
self.assertEqual(usmNoPrivProtocol, result.privProtocol)
@@ -299,7 +300,7 @@ def test_get_auth_v3_authnopriv(self, m_get_secret_value, m_exists):
security_engine_result = OctetString(hexValue="80003a8c04")
self.assertEqual("secret1", result.userName)
self.assertEqual("secret2", result.authKey)
- self.assertEqual(None, result.privKey)
+ self.assertIsNone(result.privKey)
self.assertEqual("authNoPriv", result.securityLevel)
self.assertEqual(usmHMAC128SHA224AuthProtocol, result.authProtocol)
self.assertEqual(usmNoPrivProtocol, result.privProtocol)
@@ -335,3 +336,27 @@ def test_get_auth_3(self, m_get_auth):
ir.version = "3"
get_auth(Mock(), ir, Mock())
m_get_auth.assert_called()
+
+ @patch("splunk_connect_for_snmp.snmp.auth.Udp6TransportTarget")
+ @patch("splunk_connect_for_snmp.snmp.auth.UdpTransportTarget")
+ def test_setup_transport_target_ipv4(
+ self, m_setup_udp_transport_target, m_setup_udp6_transport_target
+ ):
+ ir.address = "127.0.0.1"
+ ir.port = 161
+ m_setup_udp_transport_target.return_value = "UDP4"
+ m_setup_udp6_transport_target.return_value = "UDP6"
+ transport = setup_transport_target(ir)
+ self.assertEqual("UDP4", transport)
+
+ @patch("splunk_connect_for_snmp.snmp.auth.Udp6TransportTarget")
+ @patch("splunk_connect_for_snmp.snmp.auth.UdpTransportTarget")
+ def test_setup_transport_target_ipv6(
+ self, m_setup_udp_transport_target, m_setup_udp6_transport_target
+ ):
+ ir.address = "2001:0db8:ac10:fe01::0001"
+ ir.port = 161
+ m_setup_udp_transport_target.return_value = "UDP4"
+ m_setup_udp6_transport_target.return_value = "UDP6"
+ transport = setup_transport_target(ir)
+ self.assertEqual("UDP6", transport)
diff --git a/test/snmp/test_do_work.py b/test/snmp/test_do_work.py
index f3cb03cf6..92c9132d4 100644
--- a/test/snmp/test_do_work.py
+++ b/test/snmp/test_do_work.py
@@ -29,7 +29,7 @@ class TestDoWork(TestCase):
@patch("mongolock.MongoLock.release", MagicMock())
@patch("splunk_connect_for_snmp.snmp.auth.get_auth", None)
@patch("splunk_connect_for_snmp.snmp.manager.get_context_data", MagicMock())
- @patch("splunk_connect_for_snmp.snmp.manager.UdpTransportTarget", MagicMock())
+ @patch("splunk_connect_for_snmp.snmp.manager.setup_transport_target", MagicMock())
def test_do_work_no_work_to_do(self):
poller = Poller.__new__(Poller)
poller.last_modified = 1609675634
@@ -57,7 +57,7 @@ def test_do_work_no_work_to_do(self):
@patch("mongolock.MongoLock.release", MagicMock())
@patch("splunk_connect_for_snmp.snmp.auth.get_auth", None)
@patch("splunk_connect_for_snmp.snmp.manager.get_context_data", MagicMock())
- @patch("splunk_connect_for_snmp.snmp.manager.UdpTransportTarget", MagicMock())
+ @patch("splunk_connect_for_snmp.snmp.manager.setup_transport_target", MagicMock())
@patch("splunk_connect_for_snmp.snmp.manager.bulkCmd")
@patch("splunk_connect_for_snmp.snmp.manager.getCmd")
@patch("splunk_connect_for_snmp.common.collection_manager.ProfilesManager")
@@ -93,7 +93,7 @@ def test_do_work_bulk(self, load_profiles, getCmd, bulkCmd):
@patch("mongolock.MongoLock.release", MagicMock())
@patch("splunk_connect_for_snmp.snmp.auth.get_auth", None)
@patch("splunk_connect_for_snmp.snmp.manager.get_context_data", MagicMock())
- @patch("splunk_connect_for_snmp.snmp.manager.UdpTransportTarget", MagicMock())
+ @patch("splunk_connect_for_snmp.snmp.manager.setup_transport_target", MagicMock())
@patch("splunk_connect_for_snmp.snmp.manager.bulkCmd")
@patch("splunk_connect_for_snmp.snmp.manager.getCmd")
@patch(
@@ -136,7 +136,7 @@ def test_do_work_get(self, load_profiles, getCmd, bulkCmd):
@patch("mongolock.MongoLock.release", MagicMock())
@patch("splunk_connect_for_snmp.snmp.auth.get_auth", None)
@patch("splunk_connect_for_snmp.snmp.manager.get_context_data", MagicMock())
- @patch("splunk_connect_for_snmp.snmp.manager.UdpTransportTarget", MagicMock())
+ @patch("splunk_connect_for_snmp.snmp.manager.setup_transport_target", MagicMock())
@patch("splunk_connect_for_snmp.snmp.manager.bulkCmd")
@patch("splunk_connect_for_snmp.snmp.manager.getCmd")
@patch(
diff --git a/test/snmp/test_mibs.py b/test/snmp/test_mibs.py
index ccda7d957..142592c1f 100644
--- a/test/snmp/test_mibs.py
+++ b/test/snmp/test_mibs.py
@@ -52,3 +52,23 @@ def test_exception_during_loading(self):
poller.builder = Mock()
poller.builder.loadModules.side_effect = error.MibLoadError()
poller.load_mibs(["a"])
+
+ def test_find_new_mibs_is_found(self):
+ poller = Poller.__new__(Poller)
+ poller.is_mib_known = Mock()
+ poller.is_mib_known.return_value = (True, "SNMPv2-SMI")
+ remote_mib = ["SNMPv2-SMI"]
+ found = poller.find_new_mibs("1.3.6.1.3.4", remote_mib, "address", "some ID")
+
+ self.assertTrue(found)
+ self.assertEqual(remote_mib, ["SNMPv2-SMI"])
+
+ def test_find_new_mibs_add_new(self):
+ poller = Poller.__new__(Poller)
+ poller.is_mib_known = Mock()
+ poller.is_mib_known.return_value = (False, "SNMPv2-SMI")
+ remote_mib = ["RFC1213-MIB"]
+ found = poller.find_new_mibs("1.3.6.1.3.4", remote_mib, "address", "some ID")
+
+ self.assertEqual(remote_mib, ["RFC1213-MIB", "SNMPv2-SMI"])
+ self.assertFalse(found)
diff --git a/test/snmp/test_process_snmp_data.py b/test/snmp/test_process_snmp_data.py
index 9b586a7d2..b3efe092b 100644
--- a/test/snmp/test_process_snmp_data.py
+++ b/test/snmp/test_process_snmp_data.py
@@ -1,5 +1,6 @@
+from tokenize import group
from unittest import TestCase
-from unittest.mock import Mock, patch
+from unittest.mock import MagicMock, Mock, patch
from splunk_connect_for_snmp.snmp.manager import Poller
@@ -315,3 +316,185 @@ def test_metrics_with_profile(
},
metrics,
)
+
+ @patch("time.time", MagicMock(return_value=12345))
+ def test_handle_metrics_add_fields(self):
+ poller = Poller.__new__(Poller)
+ group_key = "KEY"
+ metric = "sysUpTime"
+ metric_type = "ObjectIdentifier"
+ metric_value = "1234567"
+ metrics = {"KEY": {"metrics": {}, "fields": {}, "profiles": []}}
+ mib = "SNMPv2-MIB"
+ oid = "1.3.6.1.2.1.1.3.0"
+ profile = "some_profile"
+
+ poller.handle_metrics(
+ group_key, metric, metric_type, metric_value, metrics, mib, oid, profile
+ )
+ self.assertEqual(
+ metrics,
+ {
+ "KEY": {
+ "metrics": {},
+ "fields": {
+ "SNMPv2-MIB.sysUpTime": {
+ "time": 12345,
+ "type": "ObjectIdentifier",
+ "value": "1234567",
+ "oid": "1.3.6.1.2.1.1.3.0",
+ }
+ },
+ "profiles": [],
+ },
+ },
+ )
+
+ @patch("time.time", MagicMock(return_value=12345))
+ def test_handle_metrics_add_metrics_float(self):
+ poller = Poller.__new__(Poller)
+ group_key = "KEY"
+ metric = "sysNum"
+ metric_type = "g"
+ metric_value = 123.0
+ metrics = {"KEY": {"metrics": {}, "fields": {}, "profiles": []}}
+ mib = "SNMPv2-MIB"
+ oid = "1.3.6.1.2.1.1.3.0"
+ profile = "some_profile"
+
+ poller.handle_metrics(
+ group_key, metric, metric_type, metric_value, metrics, mib, oid, profile
+ )
+ self.assertEqual(
+ metrics,
+ {
+ "KEY": {
+ "fields": {},
+ "metrics": {
+ "SNMPv2-MIB.sysNum": {
+ "time": 12345,
+ "type": "g",
+ "value": 123.0,
+ "oid": "1.3.6.1.2.1.1.3.0",
+ }
+ },
+ "profiles": ["some_profile"],
+ }
+ },
+ )
+
+ def test_set_profile_name_matching_varbind_id(self):
+ poller = Poller.__new__(Poller)
+
+ mapping = {
+ "SNMPv2-MIB::sysDescr.0": "BaseDeviceData",
+ "SNMPv2-MIB::sysName.0": "BaseData",
+ }
+ metric = "sysDescr"
+ mib = "SNMPv2-MIB"
+ varbind_id = "SNMPv2-MIB::sysDescr.0"
+ profile = poller.set_profile_name(mapping, metric, mib, varbind_id)
+ self.assertEqual(profile, "BaseDeviceData")
+
+ def test_set_profile_name_matching_metric(self):
+ poller = Poller.__new__(Poller)
+ mapping = {
+ "SNMPv2-MIB::sysDescr.0": "BaseDeviceData",
+ "SNMPv2-MIB::sysName.0": "BaseData",
+ }
+ metric = "sysDescr"
+ mib = "SNMPv2-MIB"
+ varbind_id = ""
+ profile = poller.set_profile_name(mapping, metric, mib, varbind_id)
+ self.assertEqual(profile, "BaseDeviceData")
+
+ def test_set_profile_name_matching_mib(self):
+ poller = Poller.__new__(Poller)
+ mapping = {
+ "SNMPv2-MIB::sysDescr.0": "BaseDeviceData",
+ "SNMPv2-MIB::sysName.0": "BaseData",
+ }
+ metric = "sysData"
+ mib = "SNMPv2-MIB"
+ varbind_id = ""
+ profile = poller.set_profile_name(mapping, metric, mib, varbind_id)
+ self.assertEqual(profile, "BaseDeviceData")
+
+ def test_set_profile_name_no_match(self):
+ poller = Poller.__new__(Poller)
+ mapping = {
+ "SNMPv2-MIB::sysDescr.0": "BaseDeviceData",
+ "SNMPv2-MIB::sysName.0": "BaseData",
+ }
+ metric = "sysData"
+ mib = "SNMPv3-MIB"
+ varbind_id = ""
+ profile = poller.set_profile_name(mapping, metric, mib, varbind_id)
+ self.assertIsNone(profile)
+
+ def test_match_mapping_to_profile_no_match(self):
+ poller = Poller.__new__(Poller)
+ mapping = {
+ "IF-MIB::sysDescr.0": "BaseDevice",
+ "SNMPv2-MIB::sysDescr.0": "BaseDeviceData",
+ "SNMPv2-MIB::sysName.0": "BaseData",
+ }
+ mib = "SNMPv3-MIB"
+ profile = None
+ profile = poller.match_mapping_to_profile(mapping, mib, profile)
+ self.assertIsNone(profile)
+
+ def test_match_mapping_to_profile_match(self):
+ poller = Poller.__new__(Poller)
+ mapping = {
+ "IF-MIB::sysDescr.0": "BaseDevice",
+ "SNMPv2-MIB::sysDescr.0": "BaseDeviceData",
+ "SNMPv2-MIB::sysName.0": "BaseData",
+ }
+ mib = "SNMPv2-MIB"
+ profile = None
+ profile = poller.match_mapping_to_profile(mapping, mib, profile)
+ self.assertEqual(profile, "BaseDeviceData")
+
+ @patch(
+ "splunk_connect_for_snmp.snmp.manager.extract_indexes",
+ MagicMock(return_value=[1]),
+ )
+ def test_handle_groupkey_without_metrics(self):
+ poller = Poller.__new__(Poller)
+ mapping = {
+ "IF-MIB::sysDescr.0": "BaseDevice",
+ "SNMPv2-MIB::sysDescr.0": "BaseDeviceData",
+ "SNMPv2-MIB::sysName.0": "BaseData",
+ }
+ group_key = "SNMPv2-MIB::tuple=int=0"
+ index = MagicMock()
+ metrics = {}
+ poller.handle_groupkey_without_metrics(group_key, index, mapping, metrics)
+ self.assertEqual(
+ metrics,
+ {
+ "SNMPv2-MIB::tuple=int=0": {
+ "indexes": [1],
+ "fields": {},
+ "metrics": {},
+ "profiles": [],
+ }
+ },
+ )
+
+ @patch(
+ "splunk_connect_for_snmp.snmp.manager.extract_indexes",
+ MagicMock(return_value=[1]),
+ )
+ def test_handle_groupkey_without_metrics_no_mapping(self):
+ poller = Poller.__new__(Poller)
+ mapping = {}
+ group_key = "SNMPv2-MIB::tuple=int=0"
+ index = MagicMock()
+ metrics = {}
+ poller.handle_groupkey_without_metrics(group_key, index, mapping, metrics)
+ self.assertEqual(
+ metrics,
+ {"SNMPv2-MIB::tuple=int=0": {"indexes": [1], "fields": {}, "metrics": {}}},
+ )