diff --git a/.github/workflows/ci-docker-deployment.yaml b/.github/workflows/ci-docker-deployment.yaml index cf7c3356f..dc6666419 100644 --- a/.github/workflows/ci-docker-deployment.yaml +++ b/.github/workflows/ci-docker-deployment.yaml @@ -14,7 +14,7 @@ jobs: strategy: matrix: python-version: - - 3.10 + - 3.13 steps: - name: Check out code uses: actions/checkout@v4 diff --git a/.github/workflows/ci-main.yaml b/.github/workflows/ci-main.yaml index e6432bfeb..bfa958b9f 100644 --- a/.github/workflows/ci-main.yaml +++ b/.github/workflows/ci-main.yaml @@ -70,7 +70,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: "3.10" + python-version: "3.13" - uses: pre-commit/action@v3.0.1 trivy-scan: @@ -102,7 +102,7 @@ jobs: strategy: matrix: python-version: - - "3.10" + - "3.13" steps: - uses: actions/checkout@v4 - name: Setup python @@ -197,7 +197,7 @@ jobs: - name: Setup python uses: actions/setup-python@v5 with: - python-version: "3.10" + python-version: "3.13" - name: Install docker compose run: | # Add Docker's official GPG key: diff --git a/.github/workflows/ci-ui-tests.yaml b/.github/workflows/ci-ui-tests.yaml index 7530cf1c7..fda33191a 100644 --- a/.github/workflows/ci-ui-tests.yaml +++ b/.github/workflows/ci-ui-tests.yaml @@ -64,7 +64,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.10" + python-version: "3.13" - name: remove not used docker images run: | diff --git a/.github/workflows/mike.yaml b/.github/workflows/mike.yaml index 67171f31d..2d56a555a 100644 --- a/.github/workflows/mike.yaml +++ b/.github/workflows/mike.yaml @@ -40,7 +40,7 @@ jobs: token: "${{ secrets.PAT_CLATOOL }}" - uses: actions/setup-python@v5 with: - python-version: "3.10" + python-version: "3.13" - name: Upload Docs run: | sudo apt update diff --git a/.github/workflows/offline-installation.yaml b/.github/workflows/offline-installation.yaml index f15362233..57f1153df 100644 --- a/.github/workflows/offline-installation.yaml +++ b/.github/workflows/offline-installation.yaml @@ -14,7 +14,7 @@ jobs: strategy: matrix: python-version: - - "3.10" + - "3.13" steps: - name: Check out code uses: actions/checkout@v4 diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d4a85a99..1bc412446 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - Implemented automatic data migration from Bitnami deployments (PVC reuse) - Enabled AOF persistence by default for data durability - add CounterBasedGauge64 and ZeroBasedCounter64 as metrics types +- add SNMP-enabled device discovery feature ### Fixes - fix problem with service rendering when `traps.service.usemetallb` is set to false diff --git a/Dockerfile b/Dockerfile index c66bba577..e221d5926 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,11 @@ -FROM python:3.10-alpine AS base +FROM python:3.13-alpine AS base ENV PYTHONFAULTHANDLER=1 \ PYTHONHASHSEED=random \ PYTHONUNBUFFERED=1 RUN apk add -U git sqlite-dev RUN pip install --upgrade setuptools pip +RUN apk add --no-cache nmap RUN mkdir /app WORKDIR /app diff --git a/charts/splunk-connect-for-snmp/Chart.lock b/charts/splunk-connect-for-snmp/Chart.lock index 64ccea2b0..2ae3a4a0b 100644 --- a/charts/splunk-connect-for-snmp/Chart.lock +++ b/charts/splunk-connect-for-snmp/Chart.lock @@ -5,5 +5,5 @@ dependencies: - name: mibserver repository: https://pysnmp.github.io/mibs/charts/ version: 1.15.25 -digest: sha256:747fcedec83bf0d80600166a021b35436d8d2ea877b60e9a43044ed2140cf1c5 -generated: "2025-10-13T12:15:04.255986+02:00" +digest: sha256:204f5bac63adfed27167a2d1f76682400e98ff9dd39bb319357633de25f91525 +generated: "2025-11-06T09:07:30.615857862Z" diff --git a/charts/splunk-connect-for-snmp/templates/NOTES.txt b/charts/splunk-connect-for-snmp/templates/NOTES.txt index 94fd81372..ee7ac9d34 100644 --- a/charts/splunk-connect-for-snmp/templates/NOTES.txt +++ b/charts/splunk-connect-for-snmp/templates/NOTES.txt @@ -1,2 +1,3 @@ Default walk no longer calls full oid tree, instead it is collecting only 'SNMPv2-MIB'. -If you want to call full oid for the devices, you have to set enableFullWalk flag to true. \ No newline at end of file +If you want to call full oid for the devices, you have to set enableFullWalk flag to true. +If the discovery feature is enabled, the time required may increase depending on the number of devices in the specified subnet. diff --git a/charts/splunk-connect-for-snmp/templates/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/_helpers.tpl index 272726007..bd723388d 100644 --- a/charts/splunk-connect-for-snmp/templates/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/_helpers.tpl @@ -96,6 +96,17 @@ Whether enable polling {{- end -}} {{- end }} +{{/* +Whether enable discovery +*/}} +{{- define "splunk-connect-for-snmp.discovery.enable" -}} +{{- if .Values.discovery.enabled }} +{{- printf "true" }} +{{- else }} +{{- printf "false" }} +{{- end -}} +{{- end }} + {{- /* Generate Redis environment variables for application pods */ -}} diff --git a/charts/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/charts/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..39460f953 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "splunk-connect-for-snmp.name" . }}-discovery-config +data: + discovery-config.yaml: | +{{- if .Values.discovery }} +{{ .Values.discovery | toYaml | indent 4 }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/discovery/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/discovery/_helpers.tpl new file mode 100644 index 000000000..91f35da53 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/discovery/_helpers.tpl @@ -0,0 +1,50 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "splunk-connect-for-snmp.discovery.name" -}} +{{- default (printf "%s-%s" .Chart.Name "discovery") .Values.discovery.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "splunk-connect-for-snmp.discovery.fullname" -}} +{{- if .Values.discovery.fullnameOverride }} +{{- .Values.discovery.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default (printf "%s-%s" .Chart.Name "discovery") .Values.discovery.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Not using it anywhere +*/}} +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "splunk-connect-for-snmp.discovery.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "splunk-connect-for-snmp.discovery.selectorLabels" -}} +app.kubernetes.io/name: {{ include "splunk-connect-for-snmp.discovery.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "splunk-connect-for-snmp.discovery.labels" -}} +{{ include "splunk-connect-for-snmp.discovery.selectorLabels" . }} +{{ include "splunk-connect-for-snmp.labels" . }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/discovery/job.yaml b/charts/splunk-connect-for-snmp/templates/discovery/job.yaml new file mode 100644 index 000000000..a92d01832 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/discovery/job.yaml @@ -0,0 +1,78 @@ +{{- if eq (include "splunk-connect-for-snmp.discovery.enable" .) "true" }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "splunk-connect-for-snmp.discovery.fullname" . }} + labels: + {{- include "splunk-connect-for-snmp.discovery.labels" . | nindent 4 }} +spec: + ttlSecondsAfterFinished: 300 + template: + metadata: + # {{- with .Values.inventory.podAnnotations }} + # annotations: + # {{- toYaml . | nindent 8 }} + # {{- end }} + + labels: + {{- include "splunk-connect-for-snmp.discovery.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-discovery + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + ["discovery"] + env: + {{- if .Values.redis.auth.enabled }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.redis.auth.existingSecret }} + name: {{ .Values.redis.auth.existingSecret }} + key: {{ .Values.redis.auth.existingSecretPasswordKey | default "password" }} + {{- else }} + name: {{ .Release.Name }}-redis-secret + key: password + {{- end }} + {{- end }} + - name: REDIS_HOST + value: {{ .Release.Name }}-redis + - name: REDIS_PORT + value: "6379" + - name: REDIS_DB + value: "1" + - name: CELERY_DB + value: "0" + - name: DISCOVERY_CONFIG_PATH + value: /app/discovery/discovery-config.yaml + - name: LOG_LEVEL + value: {{ .Values.discovery.logLevel | default "INFO" }} + - name: CHAIN_OF_TASKS_EXPIRY_TIME + value: {{ .Values.scheduler.tasksExpiryTime | quote }} + - name: CELERY_TASK_TIMEOUT + value: {{ .Values.worker.taskTimeout | quote}} + volumeMounts: + - name: discovery-config + mountPath: "/app/discovery" + readOnly: true + - name: tmp + mountPath: "/tmp/" + readOnly: false + + volumes: + # # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: discovery-config + configMap: + name: {{ include "splunk-connect-for-snmp.name" . }}-discovery-config + items: + - key: "discovery-config.yaml" + path: "discovery-config.yaml" + - name: tmp + emptyDir: {} + restartPolicy: OnFailure +{{- end -}} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index c00d2c992..bf3584ca2 100644 --- a/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "beat", ] env: + - name: USER + value: {{ .Values.deploymentUser | default "sc4snmp" | quote }} - name: CONFIG_PATH value: /app/config/config.yaml {{- include "splunk-connect-for-snmp.redis-env" . | nindent 12 }} diff --git a/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml b/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml index 307785694..24ce9f755 100644 --- a/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/sim/deployment.yaml @@ -45,6 +45,8 @@ spec: imagePullPolicy: {{ .Values.sim.pullPolicy | default "IfNotPresent" }} args: ["--config=/config/otel-collector-config.yaml"] env: + - name: USER + value: {{ .Values.deploymentUser | default "sc4snmp" | quote }} {{- include "splunk-connect-for-snmp.redis-env" . | nindent 12 }} - name: signalfxToken valueFrom: diff --git a/charts/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/charts/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 75660e398..70e76bfde 100644 --- a/charts/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/charts/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -27,5 +27,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: {{ .Values.deploymentUser | default "sc4snmp" | quote }} + restartPolicy: Never diff --git a/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml b/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml index 479d01438..763d379bc 100644 --- a/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -46,6 +46,8 @@ spec: "trap" ] env: + - name: USER + value: {{ .Values.deploymentUser | default "sc4snmp" | quote }} - name: CONFIG_PATH value: /app/config/config.yaml - name: MONGO_URI diff --git a/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl b/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl index 766115441..ade5cecf4 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl +++ b/charts/splunk-connect-for-snmp/templates/worker/_helpers.tpl @@ -59,6 +59,10 @@ app.kubernetes.io/name: {{ include "splunk-connect-for-snmp.worker.name" . }}-fl app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} +{{- define "splunk-connect-for-snmp.worker.discovery.selectorLabels" -}} +app.kubernetes.io/name: {{ include "splunk-connect-for-snmp.worker.name" . }}-discovery +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} {{/* Common labels @@ -88,6 +92,11 @@ Common labels {{ include "splunk-connect-for-snmp.labels" . }} {{- end }} +{{- define "splunk-connect-for-snmp.worker.discovery.labels" -}} +{{ include "splunk-connect-for-snmp.worker.discovery.selectorLabels" . }} +{{ include "splunk-connect-for-snmp.labels" . }} +{{- end }} + {{- define "environmental-variables" -}} - name: CONFIG_PATH value: /app/config/config.yaml @@ -118,6 +127,8 @@ Common labels value: {{ .Values.worker.disableMongoDebugLogging | quote }} - name: UDP_CONNECTION_TIMEOUT value: {{ .Values.worker.udpConnectionTimeout | default "3" | quote }} +- name: UDP_CONNECTION_RETRIES + value: {{ .Values.worker.udpConnectionRetries | default "5" | quote }} - name: MAX_OID_TO_PROCESS value: {{ .Values.poller.maxOidToProcess | default "70" | quote }} - name: MAX_REPETITIONS @@ -210,4 +221,11 @@ Common labels {{ else }} value: "false" {{- end }} +{{- end }} + +{{- define "environmental-variables-discovery" -}} +- name: WORKER_CONCURRENCY + value: {{ .Values.worker.discovery.concurrency | default "4" | quote }} +- name: PREFETCH_COUNT + value: {{ .Values.worker.discovery.prefetch | default "30" | quote }} {{- end }} \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/templates/worker/discovery/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/discovery/deployment.yaml new file mode 100644 index 000000000..f6a576c2c --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/worker/discovery/deployment.yaml @@ -0,0 +1,97 @@ +{{- if eq (include "splunk-connect-for-snmp.discovery.enable" .) "true" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-discovery + labels: + {{- include "splunk-connect-for-snmp.worker.discovery.labels" . | nindent 4 }} +spec: + {{- if not .Values.worker.discovery.autoscaling.enabled }} + replicas: {{ .Values.worker.discovery.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "splunk-connect-for-snmp.worker.discovery.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "splunk-connect-for-snmp.worker.discovery.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "splunk-connect-for-snmp.serviceAccountName" . }} + securityContext: + fsGroup: 10001 + containers: + - name: {{ .Chart.Name }}-discovery + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + [ + "celery", "worker-discovery", + ] + env: + - name: USER + value: {{ .Values.deploymentUser | default "sc4snmp" | quote }} + {{- include "environmental-variables" . | nindent 12 }} + - name: DISCOVERY_FOLDER_PATH + value: /app/discovery + - name: CELERY_TASK_TIMEOUT + value: {{ .Values.worker.taskTimeout | quote}} + - name: IPv6_ENABLED + {{- if .Values.discovery.ipv6Enabled}} + value: "true" + {{ else }} + value: "false" + {{- end }} + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + - name: discovery-volume + mountPath: /app/discovery + {{- range (.Values.discovery).usernameSecrets }} + - name: {{ . }}-snmpv3-secrets + mountPath: /app/secrets/snmpv3/{{ . }} + readOnly: true + {{- end }} + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: {{ include "splunk-connect-for-snmp.name" . }}-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} + - name: discovery-volume + hostPath: + path: {{ .Values.discovery.discoveryPath }} + type: Directory + {{- range (.Values.discovery).usernameSecrets }} + - name: {{ . }}-snmpv3-secrets + secret: + secretName: {{ . }} + {{- end }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/discovery/hpa.yaml b/charts/splunk-connect-for-snmp/templates/worker/discovery/hpa.yaml new file mode 100644 index 000000000..d6d0a0a21 --- /dev/null +++ b/charts/splunk-connect-for-snmp/templates/worker/discovery/hpa.yaml @@ -0,0 +1,24 @@ +{{- if and ( eq (include "splunk-connect-for-snmp.discovery.enable" .) "true" ) (eq ( toString .Values.worker.discovery.autoscaling.enabled) "true") }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-discovery + labels: + {{- include "splunk-connect-for-snmp.worker.discovery.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "splunk-connect-for-snmp.worker.fullname" . }}-discovery + minReplicas: {{ .Values.worker.discovery.autoscaling.minReplicas }} + maxReplicas: {{ .Values.worker.discovery.autoscaling.maxReplicas }} + metrics: + {{- if .Values.worker.discovery.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.worker.discovery.autoscaling.targetCPUUtilizationPercentage | default 80 }} + {{- end }} +{{- end }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/flower/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/flower/deployment.yaml index 6ec6d264d..19c5eb897 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/flower/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/worker/flower/deployment.yaml @@ -40,6 +40,8 @@ spec: "celery", "flower", ] env: + - name: USER + value: {{ .Values.deploymentUser | default "sc4snmp" | quote }} {{- include "environmental-variables" . | nindent 12 }} {{ include "splunk-connect-for-snmp.redis-env" . | nindent 12 }} ports: diff --git a/charts/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml index 89db601ec..f88834783 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -46,6 +46,8 @@ spec: "celery", "worker-poller", ] env: + - name: USER + value: {{ .Values.deploymentUser | default "sc4snmp" | quote }} {{- include "environmental-variables" . | nindent 12 }} {{- include "environmental-variables-poller" . | nindent 12 }} {{- if .Values.worker.livenessProbe.enabled }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index 775a76948..1e04a77d2 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -45,6 +45,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: {{ .Values.deploymentUser | default "sc4snmp" | quote }} {{- include "environmental-variables" . | nindent 12 }} {{- include "environmental-variables-sender" . | nindent 12 }} {{- if .Values.splunk.mtls.enabled }} diff --git a/charts/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/charts/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index e5bf4021f..adfa02dac 100644 --- a/charts/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/charts/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -46,6 +46,8 @@ spec: "celery", "worker-trap", ] env: + - name: USER + value: {{ .Values.deploymentUser | default "sc4snmp" | quote }} {{- include "environmental-variables" . | nindent 12 }} {{- include "environmental-variables-trap" . | nindent 12 }} volumeMounts: diff --git a/charts/splunk-connect-for-snmp/values.schema.json b/charts/splunk-connect-for-snmp/values.schema.json index a0a702a4b..eb32a757b 100644 --- a/charts/splunk-connect-for-snmp/values.schema.json +++ b/charts/splunk-connect-for-snmp/values.schema.json @@ -10,7 +10,8 @@ "poller", "worker", "inventory", - "traps" + "traps", + "discovery" ], "title": "Values", "additionalProperties": false, @@ -34,6 +35,9 @@ } } }, + "deploymentUser" : { + "type": "string" + }, "imagePullSecrets": { "type": "array" }, @@ -593,6 +597,69 @@ } } }, + "discovery": { + "type": "object", + "additionalProperties": false, + "properties": { + "replicaCount": { + "type": "integer" + }, + "concurrency": { + "type": "integer" + }, + "prefetch": { + "type": "integer" + }, + "autoscaling": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "minReplicas": { + "type": "integer" + }, + "maxReplicas": { + "type": "integer" + }, + "targetCPUUtilizationPercentage": { + "type": "integer" + } + } + }, + "resources": { + "type": "object", + "additionalProperties": false, + "properties": { + "limits": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": ["integer", "string"] + }, + "memory": { + "type": ["integer", "string"] + } + } + }, + "requests": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": ["integer", "string"] + }, + "memory": { + "type": ["integer", "string"] + } + } + } + } + } + } + }, "livenessProbe": { "type": "object", "additionalProperties": false, @@ -663,6 +730,9 @@ "udpConnectionTimeout": { "type": "integer" }, + "udpConnectionRetries": { + "type": "integer" + }, "ignoreEmptyVarbinds": { "type": "boolean" }, @@ -904,6 +974,88 @@ } } } + }, + "discovery": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "ipv6Enabled": { + "type": "boolean" + }, + "usernameSecrets": { + "type": "array" + }, + "discoveryPath": { + "type": "string" + }, + "logLevel": { + "type": "string" + }, + "autodiscovery": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^[^0-9].*": { + "type": "object", + "additionalProperties": false, + "properties": { + "frequency": { + "type": "integer" + }, + "delete_already_discovered": { + "type": "boolean" + }, + "network_address": { + "type": "string" + }, + "version": { + "type": "string", + "enum": ["1", "2c", "3"] + }, + "secret": { + "type": ["string", "null"] + }, + "community": { + "type": ["string", "null"] + }, + "port": { + "type": "integer", + "minimum": 1, + "maximum": 65535 + }, + "security_engine": { + "type": "string" + }, + "device_rules": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "patterns": { + "type": "string" + }, + "group": { + "type": "string" + } + }, + "required": [ + "name", + "patterns", + "group" + ] + } + } + } + } + } + } + } } } } \ No newline at end of file diff --git a/charts/splunk-connect-for-snmp/values.yaml b/charts/splunk-connect-for-snmp/values.yaml index f45579177..5b4d66db7 100644 --- a/charts/splunk-connect-for-snmp/values.yaml +++ b/charts/splunk-connect-for-snmp/values.yaml @@ -351,6 +351,32 @@ worker: # the resources requests for sender worker container requests: cpu: 250m + + # The discovery worker handles auto discovery of snmp enabled devices and create CSV file for it + discovery: + # number of the discovery replicas when autoscaling is set to false + replicaCount: 1 + # minimum number of threads in a pod + concurrency: 4 + # how many tasks are consumed from the queue at once + prefetch: 30 + autoscaling: + # enabling autoscaling for discovery worker pods + enabled: false + # minimum number of running discovery worker pods when autoscaling is enabled + minReplicas: 2 + # maximum number of running discovery worker pods when autoscaling is enabled + maxReplicas: 10 + # CPU % threshold that must be exceeded on discovery worker pods to spawn another replica + targetCPUUtilizationPercentage: 80 + resources: + # the resources limits for discovery worker container + limits: + cpu: 500m + # the resources requests for discovery worker container + requests: + cpu: 250m + # Liveness probes are used in Kubernetes to know when a pod is alive or dead. # A pod can be in a dead state for a number of reasons; # the application could be crashed, some error in the application etc. @@ -403,6 +429,8 @@ worker: podAntiAffinity: soft # udpConnectionTimeout timeout in seconds for SNMP operations udpConnectionTimeout: 3 + # udpConnectionRetries number of retries for SNMP operations + udpConnectionRetries: 5 # in case of seeing "Empty SNMP response message" this variable can be set to true ignoreEmptyVarbinds: false @@ -512,6 +540,8 @@ serviceAccount: # This parameter allows to use SC4SNMP for older version of Kubernetes that doesn't support autoscaling/v2 useDeprecatedAPI: false +# A user name to set in the deployment environment. +deploymentUser: "sc4snmp" ############################################################################# ### Please do not modify below values, unless you know what you're doing! ### ############################################################################# @@ -661,3 +691,13 @@ redis: enabled: true commonAnnotations: {} +discovery: + # Enables discovering SNMP enabled device and create CSV file. + enabled: false + # list of kubernetes secrets name that will be used for discovery + # https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/discovery-configuration/#define-usernamesecrets + usernameSecrets: [] + # Enabled device detection using IPv6 subnet. + ipv6Enabled: false + # logging level, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL + logLevel: "INFO" diff --git a/dashboard/dashboard.xml b/dashboard/dashboard.xml index c4588552a..33f5b7e35 100644 --- a/dashboard/dashboard.xml +++ b/dashboard/dashboard.xml @@ -185,6 +185,81 @@ + + + SNMP discovery status + + + all + * + * + key + key + + index=* sourcetype="*:container:splunk-connect-for-snmp-*" "Scheduler: Sending due task sc4snmp;*;discovery" | rex field=_raw "Sending due task sc4snmp;(?<key>.+);discovery" | stats count by key + -24h@h + now + + + + In case of unsuccessful discovery status, please copy SPL query from this chart and find failed tasks. Explanation of error log messages you can find at + https://splunk.github.io/splunk-connect-for-snmp/main/troubleshooting/discovery-issues/ + + index=* sourcetype="*:container:splunk-connect-for-snmp-*" splunk_connect_for_snmp.discovery.tasks.discovery "$discovery_status_key$" | rex field=_raw "Task splunk_connect_for_snmp.*\[*\] (?<status>\w+)" | where status != "received" | timechart count by status + -24h@h + now + 5m + delay + + + + + + + + + + + + + search?q=index%3D*%20sourcetype%3D%22*%3Acontainer%3Asplunk-connect-for-snmp-*%22%20splunk_connect_for_snmp.discovery.tasks.discovery%20%22$discovery_status_key$%22%20%7C%20rex%20field%3D_raw%20%22Task%20splunk_connect_for_snmp.*%5C%5B*%5C%5D%20(%3F%3Cstatus%3E%5Cw%2B)%22%20%7C%20where%20status%20!%3D%20%22received%22&earliest=-24h@h&latest=now + + + + + SNMP schedule of discovery tasks + + + all + * + * + key + key + + index=* sourcetype="*:container:splunk-connect-for-snmp-*" "Scheduler: Sending due task sc4snmp;*;discovery" | rex field=_raw "Sending due task sc4snmp;(?<key>.+);discovery" | stats count by key + -24h@h + now + + + + Using this chart you can understand when SC4SNMP scheduled discovery for your Discovery Key last time. The process works if it runs regularly. + + index=* sourcetype="*:container:splunk-connect-for-snmp-*" Scheduler: Sending due task sc4snmp;*;discovery "$discovery_key$" | timechart count + -24h@h + now + 5m + delay + + + + + + + search?q=index%3D*%20sourcetype%3D%22*%3Acontainer%3Asplunk-connect-for-snmp-*%22%20Scheduler%3A%20Sending%20due%20task%20sc4snmp%3B*%3Bdiscovery%20%22$discovery_key$%22&earliest=-24h@h&latest=now + + + + SNMP send to Splunk status diff --git a/docker_compose/.env b/docker_compose/.env index e633c1581..d2b58e319 100644 --- a/docker_compose/.env +++ b/docker_compose/.env @@ -4,8 +4,10 @@ SC4SNMP_TAG="1.14.2-beta.5" SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH= TRAPS_CONFIG_FILE_ABSOLUTE_PATH= INVENTORY_FILE_ABSOLUTE_PATH= +DISCOVERY_CONFIG_FILE_ABSOLUTE_PATH= COREFILE_ABS_PATH= SC4SNMP_VERSION="1.14.2-beta.5" +DEPLOYMENT_USER= # Network configuration COREDNS_ADDRESS=172.28.0.255 @@ -16,6 +18,11 @@ IPAM_GATEWAY=172.28.0.1 IPAM_SUBNET_IPv6=fd02::/64 IPAM_GATEWAY_IPv6=fd02::1 +# Discovery +DISCOVERY_ENABLE=true +DISCOVERY_LOG_LEVEL=INFO +DISCOVERY_PATH= + # Dependencies images COREDNS_IMAGE=coredns/coredns COREDNS_TAG=1.11.1 @@ -52,7 +59,9 @@ IGNORE_NOT_INCREASING_OIDS= WORKER_LOG_LEVEL=INFO WORKER_DISABLE_MONGO_DEBUG_LOGGING=true UDP_CONNECTION_TIMEOUT=3 +UDP_CONNECTION_RETRIES=5 MAX_OID_TO_PROCESS=70 +CELERY_TASK_TIMEOUT=2400 MAX_REPETITIONS=10 # Worker Poller @@ -85,6 +94,15 @@ WORKER_TRAP_MEMORY_LIMIT=500M WORKER_TRAP_CPU_RESERVATIONS=0.5 WORKER_TRAP_MEMORY_RESERVATIONS=250M +# Worker Discovery +WORKER_DISCOVERY_CONCURRENCY=4 +PREFETCH_DISCOVERY_COUNT=30 +WORKER_DISCOVERY_REPLICAS=1 +WORKER_DISCOVERY_CPU_LIMIT=1 +WORKER_DISCOVERY_MEMORY_LIMIT=500M +WORKER_DISCOVERY_CPU_RESERVATIONS=0.5 +WORKER_DISCOVERY_MEMORY_RESERVATIONS=250M + # Inventory configuration INVENTORY_LOG_LEVEL=INFO CHAIN_OF_TASKS_EXPIRY_TIME=500 diff --git a/docker_compose/docker-compose.yaml b/docker_compose/docker-compose.yaml index 8a180acff..85af0cf74 100644 --- a/docker_compose/docker-compose.yaml +++ b/docker_compose/docker-compose.yaml @@ -6,6 +6,7 @@ x-general_sc4snmp_data: &general_sc4snmp_data MIB_SOURCES: http://snmp-mibserver:8000/asn1/@mib@ MIB_INDEX: http://snmp-mibserver:8000/index.csv MIB_STANDARD: http://snmp-mibserver:8000/standard.txt + USER: ${DEPLOYMENT_USER:-sc4snmp} x-splunk_general_setup: &splunk_general_setup SPLUNK_HEC_HOST: ${SPLUNK_HEC_HOST} @@ -35,6 +36,7 @@ x-workers_general_setup: &workers_general_setup LOG_LEVEL: ${WORKER_LOG_LEVEL:-INFO} DISABLE_MONGO_DEBUG_LOGGING: ${WORKER_DISABLE_MONGO_DEBUG_LOGGING:-true} UDP_CONNECTION_TIMEOUT: ${UDP_CONNECTION_TIMEOUT:-3} + UDP_CONNECTION_RETRIES: ${UDP_CONNECTION_RETRIES:-5} MAX_OID_TO_PROCESS: ${MAX_OID_TO_PROCESS:-70} MAX_REPETITIONS: ${MAX_REPETITIONS:-10} PROFILES_RELOAD_DELAY: ${PROFILES_RELOAD_DELAY:-60} @@ -257,6 +259,52 @@ services: - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro - worker-flower-pysnmp-cache-volume:/.pysnmp/:rw - worker-flower-tmp:/tmp/:rw + discovery: + <<: [ *dns_and_networks, *dependency_and_restart_policy ] + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + container_name: sc4snmp-discovery + command: [ discovery ] + environment: + <<: *general_sc4snmp_data + LOG_LEVEL: ${DISCOVERY_LOG_LEVEL:-INFO} + CHAIN_OF_TASKS_EXPIRY_TIME: ${CHAIN_OF_TASKS_EXPIRY_TIME:-500} + CELERY_TASK_TIMEOUT: ${CELERY_TASK_TIMEOUT:-2400} + DISCOVERY_CONFIG_PATH: /app/discovery/discovery-config.yaml + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro + - ${DISCOVERY_CONFIG_FILE_ABSOLUTE_PATH}:/app/discovery/discovery-config.yaml:ro + - discovery-pysnmp-cache-volume:/.pysnmp/:rw + - discovery-tmp:/tmp/:rw + worker-discovery: + <<: [ *dns_and_networks, *dependency_and_restart_policy ] + image: ${SC4SNMP_IMAGE}:${SC4SNMP_TAG:-latest} + command: + - celery + - worker-discovery + deploy: + mode: replicated + replicas: ${WORKER_DISCOVERY_REPLICAS:-1} + resources: + limits: + cpus: ${WORKER_DISCOVERY_CPU_LIMIT:-0.50} + memory: ${WORKER_DISCOVERY_MEMORY_LIMIT:-500M} + reservations: + cpus: ${WORKER_DISCOVERY_CPU_RESERVATIONS:-0.25} + memory: ${WORKER_DISCOVERY_MEMORY_RESERVATIONS:-250M} + environment: + <<: [ *general_sc4snmp_data, *pysnmp_debug, *ipv6 ] + SC4SNMP_VERSION: ${SC4SNMP_VERSION:-latest} + LOG_LEVEL: ${WORKER_LOG_LEVEL:-INFO} + UDP_CONNECTION_TIMEOUT: ${UDP_CONNECTION_TIMEOUT:-3} + UDP_CONNECTION_RETRIES: ${UDP_CONNECTION_RETRIES:-5} + WORKER_CONCURRENCY: ${WORKER_DISCOVERY_CONCURRENCY:-1} + PREFETCH_COUNT: ${PREFETCH_SENDER_COUNT:-1} + DISCOVERY_FOLDER_PATH: /app/discovery + volumes: + - ${SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH}:/app/config/config.yaml:ro + - ${DISCOVERY_PATH}:/app/discovery/:rw + - worker-discovery-pysnmp-cache-volume:/.pysnmp/:rw + - worker-discovery-tmp:/tmp/:rw volumes: snmp-mibserver-tmp: inventory-tmp: @@ -273,3 +321,8 @@ volumes: worker-trap-pysnmp-cache-volume: worker-flower-tmp: worker-flower-pysnmp-cache-volume: + discovery-pysnmp-cache-volume: + discovery-tmp: + worker-discovery-pysnmp-cache-volume: + worker-discovery-tmp: + worker-discovery-volume: diff --git a/docker_compose/manage_secrets.py b/docker_compose/manage_secrets.py index 079d8890f..a88d83c66 100644 --- a/docker_compose/manage_secrets.py +++ b/docker_compose/manage_secrets.py @@ -4,7 +4,7 @@ import ruamel.yaml -SERVICE_SECRETS = ["worker-poller", "traps"] +SERVICE_SECRETS = ["worker-poller", "traps", "worker-discovery"] DOCKER_COMPOSE = "docker-compose.yaml" @@ -60,6 +60,7 @@ def create_secrets( secret_name: str, make_change_in_worker_poller: bool, make_change_in_traps: bool, + make_change_in_worker_discovery: bool, ): """ Function to create secrets in .env and docker-compose.yaml files @@ -68,6 +69,7 @@ def create_secrets( @param secret_name: name of the secret @param make_change_in_worker_poller: flag indicating whether to add secrets to worker poller service @param make_change_in_traps: flag indicating whether to add secrets to traps service + @param make_change_in_worker_discovery: flag indicating whether to add secrets to discovery service """ for k, v in variables.items(): if k != "contextEngineId" and not v: @@ -107,6 +109,13 @@ def create_secrets( else: traps_ready = True + if make_change_in_worker_discovery: + yaml_file, worker_discovery_ready = load_compose_worker_discovery( + new_secrets_in_workers, yaml_file + ) + else: + worker_discovery_ready = True + save_to_compose_files( path_to_compose_files, secret_name, @@ -115,6 +124,7 @@ def create_secrets( traps_ready, variables, worker_poller_ready, + worker_discovery_ready, ) except Exception as e: print(f"Problem with adding secrets. Error: {e}") @@ -128,8 +138,9 @@ def save_to_compose_files( traps_ready, variables, worker_poller_ready, + worker_discovery_ready, ): - if secrets_ready and worker_poller_ready and traps_ready: + if secrets_ready and worker_poller_ready and traps_ready and worker_discovery_ready: # If all three files were loaded into dictionary and updated successfully, # save the latest configuration to files. with open(os.path.join(path_to_compose_files, ".env"), "a") as file: @@ -157,6 +168,23 @@ def load_compose_traps(new_secrets_in_workers, yaml_file): return yaml_file, traps_ready +def load_compose_worker_discovery(new_secrets_in_workers, yaml_file): + # If the secret should be added to worker discovery, load docker-compose-worker-discovery.yaml to a dictionary and + # update "secrets" section. + try: + if "secrets" not in yaml_file["services"]["worker-discovery"]: + yaml_file["services"]["worker-discovery"]["secrets"] = [] + yaml_file["services"]["worker-discovery"]["secrets"].extend( + new_secrets_in_workers + ) + worker_discovery_ready = True + except Exception as e: + print(f"Problem with editing worker-discovery. Secret not added. Error {e}") + yaml_file = {} + worker_discovery_ready = False + return yaml_file, worker_discovery_ready + + def load_compose_worker_poller(new_secrets_in_workers, yaml_file): # If the secret should be added to worker poller, load docker-compose-worker-poller.yaml to a dictionary and # update "secrets" section. @@ -200,6 +228,7 @@ def delete_secrets( secret_name: str, make_change_in_worker_poller: bool, make_change_in_traps: bool, + make_change_in_worker_discovery: bool, ): """ Function to delete secrets from .env and docker-compose.yaml files @@ -208,6 +237,7 @@ def delete_secrets( @param secret_name: name of the secret @param make_change_in_worker_poller: flag indicating whether to delete secrets from worker poller service @param make_change_in_traps: flag indicating whether to delete secrets from traps service + @param make_change_in_worker_discovery: flag indicating whether to delete secrets from worker discovery service """ secrets = [] for key in variables.keys(): @@ -240,6 +270,16 @@ def delete_secrets( ) ) + if make_change_in_worker_discovery: + # filter out secrets destined for deletion + + yaml_file["services"]["worker-discovery"]["secrets"] = list( + filter( + lambda el: el["source"] not in secrets, + yaml_file["services"]["worker-discovery"]["secrets"], + ) + ) + except Exception as e: print(f"Problem with editing secrets section. Secret not added. Error: {e}") @@ -290,6 +330,9 @@ def main(): parser.add_argument( "--worker_poller", default="true", help="Add secret to worker poller" ) + parser.add_argument( + "--worker_discovery", default="true", help="Add secret to worker discovery" + ) parser.add_argument("--traps", default="true", help="Add secret to traps") parser.add_argument("--userName", default="", help="SNMPV3 username") parser.add_argument("--privProtocol", default="", help="SNMPV3 privProtocol") @@ -306,6 +349,7 @@ def main(): path_to_compose_files = args.path_to_compose make_change_in_worker_poller = human_bool(args.worker_poller) make_change_in_traps = human_bool(args.traps) + make_change_in_worker_discovery = human_bool(args.worker_discovery) # variables dictionary maps variables names stored inside a secret to their values variables = { @@ -332,6 +376,7 @@ def main(): secret_name, make_change_in_worker_poller, make_change_in_traps, + make_change_in_worker_discovery, ) except ValueError as e: print(e) @@ -342,6 +387,7 @@ def main(): secret_name, make_change_in_worker_poller, make_change_in_traps, + make_change_in_worker_discovery, ) diff --git a/docs/architecture/design.md b/docs/architecture/design.md index 3a5bee804..6706cb207 100644 --- a/docs/architecture/design.md +++ b/docs/architecture/design.md @@ -25,6 +25,7 @@ Diagram above present high level architecture of Splunk Connector for SNMP, it c - **Trap** - responsible for listening and receiving trap notifications from SNMP agents. The listener is always waiting for the messages coming on the specified port and passing them to the trap worker for further processing. +- **Discovery** - responsible for detecting SNMP-enabled devices within a given subnet. Celery is used to schedule and execute the discovery tasks, with Redis acting as the message broker. - **MIB Server** - responsible for serving MIBs to SNMP Workers and translating oids to varbinds. - **MongoDB** - used for storing configuration and state of the SC4SNMP. - **Inventory** - job used for updating the information about SC4SNMP configuration. It is run after every update to diff --git a/docs/dashboard.md b/docs/dashboard.md index 15f35ae70..182deaf5f 100644 --- a/docs/dashboard.md +++ b/docs/dashboard.md @@ -73,6 +73,18 @@ otherwise we will see information with another status. ![Trap dashboards](images/dashboard/trap_dashboard.png) +#### Discovery dashboards + +To check that discovery for your discovery key is working correctly, look at **SNMP schedule of discovery tasks** dashboard. +With this chart you can understand when SC4SNMP scheduled discovery for your discovery key last time. The process works if it runs regularly. + +After double-checking that SC4SNMP scheduled discovery tasks for your discovery key we need to be sure that discovery is working. +For that look at another dashboard **SNMP discovery status** and if everything is working you will see only **succeeded** status of discovery. +If something is going wrong you will see another status (like on screenshot), then use [troubleshooting docs +for that](troubleshooting/discovery-issues.md). + +![Discovery dashboards](images/dashboard/discovery_dashboard.png) + #### Other dashboards We also have tasks that will be a callback for walk and poll. For example **send** will publish result in Splunk. diff --git a/docs/discovery.md b/docs/discovery.md new file mode 100644 index 000000000..5444b6077 --- /dev/null +++ b/docs/discovery.md @@ -0,0 +1,51 @@ +# Discovery + +## Purpose +The SNMP Discovery feature in Splunk Connect for SNMP provides an automated way to identify SNMP-enabled devices within user-specified subnets. Instead of manually scanning networks or maintaining a static list of devices, users can now use this feature to generate an up-to-date list of IP addresses where SNMP is actively running. + +!!! info + The current implementation does not automatically integrate discovered devices into the polling pipeline. The discovered IPs are saved to an output file, which can then be reviewed or used manually to update your SNMP polling configuration. + + +### This feature is useful when: +- Visibility into which devices in the network have SNMP enabled is required. +- A fast way to generate a list of devices for which further monitoring is needed. + +## How It Works +The discovery process consists of two main steps: + +### 1. List devices +To begin, the system identifies all the devices within the defined subnet. + +### 2. SNMP Probing +Once the list of devices is identified: + +- The system sends SNMP requests to each device using the credentials specified in the configuration (e.g., community strings or SNMPv3 secrets). +- If the device responds successfully to an SNMP poll, the IP is considered SNMP-enabled. +- All such devices along with some details are saved to a defined output file. + +This output can later be used by the user to configure polling. + +### Multi-Subnet Support +Multiple discovery jobs can be configured to run independently for different subnets. Each job can have its frequency, SNMP version, credentials, and grouping logic. This makes it easy to scan different parts of your network separately. + +## Output Format +After each discovery run, a file named `discovery_devices.csv` is generated in the path defined by `discoveryPath`. This file includes all successfully discovered SNMP devices and can be used in poller configuration. The CSV file contains fields like key (discovery name), subnet, ip address, port, snmp version, group, secret, and community. + +To use this feature, the user must provide a valid path where the CSV file will be created. Note that this is a single shared file, and all discovery jobs for different subnets will update the same file. + +Example: + +```csv +key,subnet,ip,port,version,group,secret,community +discovery_version2c,10.202.4.200/30,10.202.4.202,161,2c,linux-group,,public +``` + +!!! info + This file serves as a reference and does not automatically update any active polling configuration. Users are expected to manually review and incorporate this list into their SNMP polling setup as needed. + + +## Configuration +To configure and run SNMP Autodiscovery, refer to: +- [Docker Compose discovery configuration](./dockercompose/11-discovery-configuration.md) +- [Microk8s discovery configuration](./microk8s/configuration/discovery-configuration.md) diff --git a/docs/dockercompose/11-discovery-configuration.md b/docs/dockercompose/11-discovery-configuration.md new file mode 100644 index 000000000..11cc917ec --- /dev/null +++ b/docs/dockercompose/11-discovery-configuration.md @@ -0,0 +1,63 @@ +# Discovery configuration + +Discovery configuration is stored in the `discovery-config.yaml` file. This file has the following sections: + +```yaml +enabled: +ipv6Enabled: +autodiscovery: + discovery_key: + frequency: + delete_already_discovered: + network_address: + version: + community: + port: + device_rules: + - name: + patterns: + Group: + +``` + +- `enabled`: To enable or disable the discovery feature set `enabled` key. The default value is `false`. +- `ipv6Enabled`: To enable IPv6 subnet scanning set `ipv6Enabled` key to be `true`. + +!!! info + If `ipv6Enabled` is `false`, then the task will not be created for discovery key with IPv6 network address. + +- `autodiscovery`: Discovery tasks are defined under the autodiscovery section. Each discovery task can target a specific subnet with its own SNMP version and settings. +Discovery key (i.e. task name) must start with a letter (not a number). Configuration of this section looks the same as in the `values.yaml` in `discovery.autodiscovery` section, which can be checked in the documentation of [discovery configuration](../microk8s/configuration/discovery-configuration.md). + +## Example of the configuration + +```yaml +enabled: true +ipv6Enabled: true +autodiscovery: + discovery_version2c: + frequency: 86400 + delete_already_discovered: true + network_address: 10.202.4.200/30 + version: "2c" + community: "public" + port: 161 + device_rules: + - name: "Linux servers" + patterns: "*linux*" + group: "linux-group" + + discovery_version3: + frequency: 43200 + delete_already_discovered: false + network_address: 10.202.4.200/30 + version: "3" + port: 161 + secret: sc4snmp-hlab-sha-aes + security_engine: "80001f8880e761866965756b6800000000" + device_rules: + - name: "Windows servers" + patterns: "*Windows*" + group: "windows-group" + +``` diff --git a/docs/dockercompose/2-download-package.md b/docs/dockercompose/2-download-package.md index 6a00101aa..5e1dfb2ef 100644 --- a/docs/dockercompose/2-download-package.md +++ b/docs/dockercompose/2-download-package.md @@ -5,7 +5,7 @@ Package with docker compose configuration files (`docker_compose.zip`) can be do ## Configuration To configure the deployment, follow the instructions in [Inventory configuration](./3-inventory-configuration.md), -[Scheduler configuration](./4-scheduler-configuration.md), [Traps configuration](./5-traps-configuration.md), +[Scheduler configuration](./4-scheduler-configuration.md), [Traps configuration](./5-traps-configuration.md), [Discovery configuration](./11-discovery-configuration.md), [.env file configuration](./6-env-file-configuration.md), [SNMPv3 secrets](./7-snmpv3-secrets.md). ## Deploying the app diff --git a/docs/dockercompose/6-env-file-configuration.md b/docs/dockercompose/6-env-file-configuration.md index ce180b4ba..841e3e4a4 100644 --- a/docs/dockercompose/6-env-file-configuration.md +++ b/docs/dockercompose/6-env-file-configuration.md @@ -11,8 +11,10 @@ Inside the directory with the docker compose files, there is a `.env`. Variables | `SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH` | Absolute path to [scheduler-config.yaml](./4-scheduler-configuration.md) file | | `TRAPS_CONFIG_FILE_ABSOLUTE_PATH` | Absolute path to [traps-config.yaml](./5-traps-configuration.md) file | | `INVENTORY_FILE_ABSOLUTE_PATH` | Absolute path to [inventory.csv](./3-inventory-configuration.md) file | +| `DISCOVERY_CONFIG_FILE_ABSOLUTE_PATH` | Absolute path to [discovery-config.yaml](./11-discovery-configuration.md) file | | `COREFILE_ABS_PATH` | Absolute path to Corefile used by coreDNS. Default Corefile can be found inside the `docker_compose` | | `SC4SNMP_VERSION` | Version of SC4SNMP | +| `DEPLOYMENT_USER` | A user name to set in the deployment environment. The default value is sc4snmp. | ## Network configuration @@ -75,6 +77,8 @@ Inside the directory with the docker compose files, there is a `.env`. Variables | `IGNORE_NOT_INCREASING_OIDS` | Ignoring `occurred: OID not increasing` issues for hosts specified in the array, ex: IGNORE_NOT_INCREASING_OIDS=127.0.0.1:164,127.0.0.6 | | `WORKER_LOG_LEVEL` | Logging level of the workers, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | | `UDP_CONNECTION_TIMEOUT` | Timeout in seconds for SNMP operations | +| `UDP_CONNECTION_RETRIES` | Number of retries for SNMP operations | +| `CELERY_TASK_TIMEOUT` | Timeout in seconds for celery task | | `MAX_OID_TO_PROCESS` | Sometimes SNMP Agent cannot accept more than X OIDs per once, so if the error "TooBig" is visible in logs, decrease the number of MAX_OID_TO_PROCESS | | `MAX_REPETITIONS` | The amount of requested next oids in response for each of varbinds in one request sent | @@ -114,6 +118,17 @@ Inside the directory with the docker compose files, there is a `.env`. Variables | `WORKER_TRAP_CPU_RESERVATIONS` | Dedicated cpu resources for worker trap container | | `WORKER_TRAP_MEMORY_RESERVATIONS` | Dedicated memory resources for worker trap container | +### Worker Discovery +| Variable | Description | +|----------------------------------------|-------------------------------------------------------------------------------| +| `WORKER_DISCOVERY_CONCURRENCY` | Minimum number of threads in the discovery container | +| `PREFETCH_DISCOVERY_COUNT` | How many tasks are consumed from the queue at once in the discovery container | +| `WORKER_DISCOVERY_REPLICAS` | Number of docker replicas of worker discovery container | +| `WORKER_DISCOVERY_CPU_LIMIT` | Limit of cpu that worker discovery container can use | +| `WORKER_DISCOVERY_MEMORY_LIMIT` | Limit of memory that worker discovery container can use | +| `WORKER_DISCOVERY_CPU_RESERVATIONS` | Dedicated cpu resources for worker discovery container | +| `WORKER_DISCOVERY_MEMORY_RESERVATIONS` | Dedicated memory resources for worker discovery container | + ## Inventory | Variable | Description | @@ -128,8 +143,17 @@ Inside the directory with the docker compose files, there is a `.env`. Variables | `SNMP_V3_SECURITY_ENGINE_ID` | SNMPv3 TRAPs require the configuration SNMP Engine ID of the TRAP sending application for the USM users table of the TRAP receiving application for each USM user, for example: SNMP_V3_SECURITY_ENGINE_ID=80003a8c04,aab123456 | | `INCLUDE_SECURITY_CONTEXT_ID` | Controls whether to add the context_engine_id field to v3 trap events | | `TRAPS_PORT` | External port exposed for traps server | + ## Scheduler | Variable | Description | |-----------------------|---------------------------------------------------------------------------------------------------| -| `SCHEDULER_LOG_LEVEL` | Logging level of the scheduler, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | \ No newline at end of file +| `SCHEDULER_LOG_LEVEL` | Logging level of the scheduler, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | + +## Discovery + +| Variable | Description | +|-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `DISCOVERY_ENABLE` | Enable for discovery feature | +| `DISCOVERY_LOG_LEVEL` | Logging level of the discovery, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | +| `DISCOVERY_PATH` | It specifies the absolute path on the local file system where the `discovery_devices.csv` file will be created. If the CSV file is not already present then new file will be created. | diff --git a/docs/dockercompose/7-snmpv3-secrets.md b/docs/dockercompose/7-snmpv3-secrets.md index e29e8ed65..5c7c770bc 100644 --- a/docs/dockercompose/7-snmpv3-secrets.md +++ b/docs/dockercompose/7-snmpv3-secrets.md @@ -14,18 +14,19 @@ pip3 install ruamel.yaml To create a new secret, `manage_secrets.py` must be run with the following flags: -| Flag | Description | -|---------------------|--------------------------------------------------------------------------------| -| `--secret_name` | New secret name | -| `--path_to_compose` | Absolute path to directory with docker compose files | -| `--worker_poller` | \[OPTIONAL\] Add new secrets to worker poller. Default value is set to 'true'. | -| `--traps` | \[OPTIONAL\] Add new secrets to traps server. Default value is set to 'true'. | -| `--userName` | SNMPv3 userName | -| `--privProtocol` | SNMPv3 privProtocol | -| `--privKey` | SNMPv3 privKey | -| `--authProtocol` | SNMPv3 authProtocol | -| `--authKey` | SNMPv3 authKey | -| `--contextEngineId` | \[OPTIONAL\] SNMPv3 engine id | +| Flag | Description | +|---------------------|-----------------------------------------------------------------------------------| +| `--secret_name` | New secret name | +| `--path_to_compose` | Absolute path to directory with docker compose files | +| `--worker_poller` | \[OPTIONAL\] Add new secrets to worker poller. Default value is set to 'true'. | +| `--worker_discovery`| \[OPTIONAL\] Add new secrets to worker discovery. Default value is set to 'true'. | +| `--traps` | \[OPTIONAL\] Add new secrets to traps server. Default value is set to 'true'. | +| `--userName` | SNMPv3 userName | +| `--privProtocol` | SNMPv3 privProtocol | +| `--privKey` | SNMPv3 privKey | +| `--authProtocol` | SNMPv3 authProtocol | +| `--authKey` | SNMPv3 authKey | +| `--contextEngineId` | \[OPTIONAL\] SNMPv3 engine id | This script, apart from updating configuration files, creates environmental variables with values of the secret at the diff --git a/docs/images/dashboard/discovery_dashboard.png b/docs/images/dashboard/discovery_dashboard.png new file mode 100644 index 000000000..bf9b735da Binary files /dev/null and b/docs/images/dashboard/discovery_dashboard.png differ diff --git a/docs/images/sc4snmp_architecture.png b/docs/images/sc4snmp_architecture.png index a79189d28..56b7e9ebf 100644 Binary files a/docs/images/sc4snmp_architecture.png and b/docs/images/sc4snmp_architecture.png differ diff --git a/docs/microk8s/configuration/deployment-configuration.md b/docs/microk8s/configuration/deployment-configuration.md index b04de7d6e..f6d123b15 100644 --- a/docs/microk8s/configuration/deployment-configuration.md +++ b/docs/microk8s/configuration/deployment-configuration.md @@ -11,6 +11,19 @@ microk8s helm3 inspect values splunk-connect-for-snmp/splunk-connect-for-snmp > The whole file is divided into the following parts: +To configure `deploymentUser`: + +- The `deploymentUser` configuration is kept in the `values.yaml` file as a global (top-level) parameter. +- `values.yaml` is used during the installation process for configuring Kubernetes values. + +- This parameter defines a user name to set in the deployment environment. The default value is sc4snmp. + +See the following deploymentUser example configuration: + +```yaml + deploymentUser: "user1" +``` + To configure the endpoint for sending SNMP data: - `splunk` - in case you use Splunk Enterprise/Cloud. @@ -25,6 +38,10 @@ For traps receiving purposes: - `traps` - For more details see [trap configuration](trap-configuration.md). +For SNMP devices discovery purpose: + +- `discovery` - For more details see [Discovery configuration](discovery-configuration.md). + Shared components: - `inventory` - For more details see [inventory configuration](../poller-configuration#configure-inventory). diff --git a/docs/microk8s/configuration/discovery-configuration.md b/docs/microk8s/configuration/discovery-configuration.md new file mode 100644 index 000000000..3c52d3635 --- /dev/null +++ b/docs/microk8s/configuration/discovery-configuration.md @@ -0,0 +1,155 @@ +# Discovery Configuration + +The discovery feature automatically discovers SNMP-enabled devices within a given subnet. Based on the discovery results, a `discovery_devices.csv` is generated and can be used to configure polling. + +Discovery supports IPv4 and IPv6 subnets, SNMP v1, v2c, and v3 devices, and basic grouping of devices using SNMP `sysDescr` from `SNMPv2-MIB` (OID `1.3.6.1.2.1.1.1.0`). + + +### Discovery configuration file + +The discovery configuration is kept in the `values.yaml` file in the discovery section. +`values.yaml` is used during the installation process for configuring Kubernetes values. + +See the following discovery example configuration: +```yaml +discovery: + enabled: true + logLevel: "DEBUG" + ipv6Enabled: true + discoveryPath: "/home/user/sc4snmp" + usernameSecrets: + - sc4snmp-hlab-sha-aes + + autodiscovery: + discovery_version2c: + frequency: 86400 + delete_already_discovered: true + network_address: 10.202.4.200/30 + version: "2c" + community: "public" + port: 161 + device_rules: + - name: "Linux servers" + patterns: "*linux*" + group: "linux-group" + + discovery_version3: + frequency: 43200 + delete_already_discovered: false + network_address: 10.202.4.200/30 + version: "3" + port: 161 + secret: sc4snmp-hlab-sha-aes + security_engine: "80001f8880e761866965756b6800000000" + device_rules: + - name: "Windows servers" + patterns: "*Windows*" + group: "windows-group" + +``` + +### Enable Discovery +To enable or disable the discovery feature set `enabled` key. +The default value is `false`. + +### Define log level +The log level for discovery can be set by changing the value for the `logLevel` key. The allowed values are`DEBUG`, `INFO`, `WARNING`, or `ERROR`. +The default value is `WARNING`. + +### Enable IPv6 +To enable IPv6 subnet scanning, set `ipv6Enabled` key. + +!!! info + If `ipv6Enabled` is `false`, then the task will not be created for discovery key with IPv6 network address. + +### Define Discovery Path +`discoveryPath` specifies the absolute path on the local file system where the `discovery_devices.csv` file will be created. +If the CSV file is not already present, then a new file will be created. + +!!! info + The path provided should have read-write permission for user and group `10001`. + +### Define usernamesecrets +The `usernameSecrets` key in the `discovery` section defines the SNMPv3 secrets for the discovery of the SNMP device. +`usernameSecrets` defines which secrets in "Secret" objects in k8s should be used, as a value, it needs the name of "Secret" objects. +For more information on how to define the "Secret" object for SNMPv3, see [SNMPv3 Configuration](snmpv3-configuration.md). + +See the following example: +```yaml +discovery: + usernameSecrets: + - sc4snmp-homesecure-sha-aes + - sc4snmp-homesecure-sha-des +``` + +### Configure discovery tasks +Discovery tasks are defined under the autodiscovery section. Each discovery task can target a specific subnet with its own SNMP version and settings. +Discovery key (i.e. task name) must start with a letter (not a number). + +Each task has the following fields to configure: + +| Field | Description | Default | Required | +|------------------ |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|----------| +| `frequency` | Time interval (in minutes) between each run of the discovery task. Note: If the frequency is less than 6 hours, it will be taken as 6 hours by default. | `86400` | NO | +| `delete_already_discovered` | Deletes old entries of a particular discovery key before writing new ones. | `false` | NO | +| `network_address` | Subnet in CIDR notation to scan. Supports IPv4 or IPv6. | | YES | +| `port` | SNMP listening port. | `161` | NO | +| `version` | SNMP version, the allowed values are `1`, `2c`, or `3`. | `2c` | NO | +| `community` | SNMP community string, this field is required when the `version` is `1` or `2c`. | | NO | +| `secret` | The reference to the secret from `discovery.usernameSecrets` that should be used to discover, this field is required when the `version` is `3` devices. | | NO | +| `security_engine` | The security engine ID required by SNMPv3. If it is not provided for version `3`, it will be autogenerated. | | NO | + + +### Define delete_already_discovered +The `delete_already_discovered` flag controls whether devices found in previous discovery runs are kept. + +Since the discovery task runs at fixed intervals to scan for SNMP-enabled devices: + - If set to `true`, all devices discovered in the previous run under the same discovery key will be deleted. This is useful when you want to ensure that the list always reflects the most up-to-date set of devices. + - If set to `false`, it will retain devices discovered in earlier runs, and new devices will be appended to the existing list. This is useful when you want to keep a cumulative list of all SNMP-enabled devices discovered over time. + +### Define device_rules +The `device_rules` section is used to organize discovered devices into logical groups based on pattern matching against their SNMP system descriptions (sysDescr). + +Each rule consists of: + +- `name`: A label to identify the rule. It is used for reference and should be unique within the list. +- `patterns`: A wildcard pattern (supports `*`) that matches against the `sysDescr` returned from SNMP. +- `group`: The name of the group to assign the matched devices to. This group can later be referenced for polling or other configurations. + +**Example** +```yaml +device_rules: + - name: "Linux Devices" + patterns: "*Linux*" + group: "linux-group" +``` + +### Configure Timeouts and Retries + +**Example** +```yaml +worker: + taskTimeout: 8000 + udpConnectionTimeout: 3 + udpConnectionRetries: 5 +``` + +The following fields help control how long discovery tasks run and how SNMP responses are handled, especially for slower networks or larger subnets: + +#### `taskTimeout` + +Defines the **maximum execution time (in seconds)** for a single discovery task. +- Default: `2400` seconds. +- Increase this if you are scanning large subnets or using longer SNMP retry configurations. + +Make sure `taskTimeout` is large enough to accommodate the `nmap` scan and the SNMP checks across all IPs. + +#### `udpConnectionTimeout` + +Specifies the **timeout (in seconds)** for each SNMP request (`getCmd`). +Increase this if devices take longer to respond or if there is network latency. + +#### `udpConnectionRetries` + +Determines how many times a request is retried if there is no response. +Higher retries can improve success rates on unstable networks, but will increase total execution time. diff --git a/docs/microk8s/configuration/values-params-description.md b/docs/microk8s/configuration/values-params-description.md index 0fa4f0a74..aee1163b5 100644 --- a/docs/microk8s/configuration/values-params-description.md +++ b/docs/microk8s/configuration/values-params-description.md @@ -120,7 +120,8 @@ Detailed documentation about configuring worker can be found in [Worker](worker- | `poller` | Section with configuration for worker poller pods | | | `trap` | Section with configuration for worker trap pods | | | `sender` | Section with configuration for worker sender pods | | -| `x.replicaCount` | Number of pod replicas when autoscaling is disabled | poller/trap - `2`, sender - `1` | +| `discovery` | Section with configuration for worker discovery pods | | +| `x.replicaCount` | Number of pod replicas when autoscaling is disabled | poller/trap - `2`, discovery/sender - `1` | | `x.concurrency` | Minimum number of threads in a pod | `4` | | `x.prefetch` | Number of tasks consumed from the queue at once | poller - `1`, traps/sender - `30` | | `x.autoscaling.enabled` | Enables autoscaling for pod | poller - `false` | @@ -143,6 +144,7 @@ Detailed documentation about configuring worker can be found in [Worker](worker- | `profilesReloadDelay` | Delay of polling profiles after inventory reload | `60` | | `logLevel` | Log level for workers | `INFO` | | `udpConnectionTimeout` | Timeout for SNMP operations in seconds | `3` | +| `udpConnectionRetries` | Number of retries for SNMP operations | `5` | | `ignoreEmptyVarbinds` | Ignores "Empty SNMP response message" in responses | `false` | | `podAntiAffinity` | [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | `soft` | | `nodeSelector` | [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) | | @@ -194,6 +196,29 @@ Detailed documentation about configuring traps can be found in [Traps](trap-conf | `nodeSelector` | [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) | | | `tolerations` | [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) | | +## Discovery + +Detailed documentation about configuring discovery can be found in [Discovery](discovery-configuration.md). + +| Variable | Description | Default | +|-------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|------------------| +| `enabled` | Enables discovering SNMP-enabled devices and creates a CSV file. | `false` | +| `usernameSecrets` | Defines SNMPv3 secrets for trap messages sent by SNMP device | | +| `ipv6Enabled` | Enabled device detection using IPv6 subnet | `false` | +| `logLevel` | Log level for a discovery pod | `INFO` | +| `discoveryPath` | Absolute path where discovery_devices.csv will be stored | | +| `autodiscovery.x.frequency` | Time interval (in minutes) between each run of the discovery task | `86400` | +| `autodiscovery.x.delete_already_discovered` | Deletes old entries of a particular discovery key before writing new ones. | `false` | +| `autodiscovery.x.network_address` | Subnet in CIDR notation to scan. | | +| `autodiscovery.x.version` | SNMP version to use | `2c` | +| `autodiscovery.x.community` | SNMP community string | | +| `autodiscovery.x.port` | SNMP port to use | `161` | +| `autodiscovery.x.secret` | Name of existing secret in kubernetes | | +| `autodiscovery.x.security_engine` | SNMP Engine ID | | +| `autodiscovery.x.device_rules.name` | Device Rule name for reference | | +| `autodiscovery.x.device_rules.patterns` | Wildcard pattern to match SNMP sysDescr | | +| `autodiscovery.x.device_rules.group` | Group name to assign matched devices | | + ## serviceAccount | Variable | Description | Default | diff --git a/docs/microk8s/configuration/worker-configuration.md b/docs/microk8s/configuration/worker-configuration.md index 7e98419c7..cca2ddf21 100644 --- a/docs/microk8s/configuration/worker-configuration.md +++ b/docs/microk8s/configuration/worker-configuration.md @@ -12,6 +12,10 @@ SC4SNMP has two base functionalities: monitoring traps and polling. These operat 3. The `sender` worker handles sending data to Splunk. You need to always have at least one sender pod running. +SC4SNMP also has a discovery functionality which is handled by the worker below: + +1. The `discovery` worker consumes all the tasks related to discovery. + ### Worker configuration file Worker configuration is kept in the `values.yaml` file in the `worker` section. `worker` has 3 subsections: `poller`, `sender`, and `trap`, that refer to the workers types. @@ -102,6 +106,30 @@ worker: # the resources requests for sender worker container requests: cpu: 250m + # The discovery worker handles auto discovery of SNMP-enabled devices and creates a CSV file for it + discovery: + # number of the discovery replicas when autoscaling is set to false + replicaCount: 1 + # minimum number of threads in a pod + concurrency: 4 + # how many tasks are consumed from the queue at once + prefetch: 30 + autoscaling: + # enabling autoscaling for discovery worker pods + enabled: false + # minimum number of running discovery worker pods when autoscaling is enabled + minReplicas: 2 + # maximum number of running discovery worker pods when autoscaling is enabled + maxReplicas: 10 + # CPU % threshold that must be exceeded on discovery worker pods to spawn another replica + targetCPUUtilizationPercentage: 80 + resources: + # the resources limits for discovery worker container + limits: + cpu: 500m + # the resources requests for discovery worker container + requests: + cpu: 250m # Liveness probes are used in Kubernetes to know when a pod is alive or dead. # A pod can be in a dead state for a number of reasons; # the application could be crashed, some error in the application etc. @@ -154,6 +182,8 @@ worker: podAntiAffinity: soft # udpConnectionTimeout timeout in seconds for SNMP operations udpConnectionTimeout: 3 + # udpConnectionRetries number of retries for SNMP operations + udpConnectionRetries: 5 # in case of seeing "Empty SNMP response message" this variable can be set to true ignoreEmptyVarbinds: false @@ -184,6 +214,8 @@ worker: replicaCount: 1 poller: replicaCount: 0 + discovery: + replicaCount: 0 logLevel: "WARNING" ``` @@ -205,6 +237,8 @@ worker: targetCPUUtilizationPercentage: 80 poller: replicaCount: 0 + discovery: + replicaCount: 0 logLevel: "WARNING" ``` @@ -255,6 +289,8 @@ worker: minReplicas: 2 maxReplicas: 20 targetCPUUtilizationPercentage: 80 + discovery: + replicaCount: 0 logLevel: "WARNING" ``` @@ -325,52 +361,62 @@ Trap worker uses in memory cache to store the results of the reverse dns lookup. ### Worker parameters -| Variable | Description | Default | -|----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|-------------------| -| worker.poller.replicaCount | Number of poller worker replicas | 2 | -| worker.poller.concurrency | Minimum number of threads in a poller worker pod | 4 | -| worker.poller.prefetch | Number of tasks consumed from the queue at once | 1 | -| worker.poller.autoscaling.enabled | Enabling autoscaling for poller worker pods | false | -| worker.poller.autoscaling.minReplicas | Minimum number of running poller worker pods when autoscaling is enabled | 2 | -| worker.poller.autoscaling.maxReplicas | Maximum number of running poller worker pods when autoscaling is enabled | 10 | -| worker.poller.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on poller worker pods to spawn another replica | 80 | -| worker.poller.resources.limits | The resources limits for poller worker container | cpu: 500m | -| worker.poller.resources.requests | The requested resources for poller worker container | cpu: 250m | -| worker.trap.replicaCount | Number of trap worker replicas | 2 | -| worker.trap.concurrency | Minimum number of threads in a trap worker pod | 4 | -| worker.trap.prefetch | Number of tasks consumed from the queue at once | 30 | -| worker.trap.resolveAddress.enabled | Enable reverse dns lookup of the IP address of the processed trap | false | -| worker.trap.resolveAddress.cacheSize | Maximum number of reverse dns lookup result records stored in cache | 500 | -| worker.trap.resolveAddress.cacheTTL | Time to live of the cached reverse dns lookup record in seconds | 1800 | -| worker.trap.autoscaling.enabled | Enabling autoscaling for trap worker pods | false | -| worker.trap.autoscaling.minReplicas | Minimum number of running trap worker pods when autoscaling is enabled | 2 | -| worker.trap.autoscaling.maxReplicas | Maximum number of running trap worker pods when autoscaling is enabled | 10 | -| worker.trap.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on trap worker pods to spawn another replica | 80 | -| worker.trap.resources.limits | The resource limit for the poller worker container | cpu: 500m | -| worker.trap.resources.requests | The requested resources for the poller worker container | cpu: 250m | -| worker.sender.replicaCount | The number of sender worker replicas | 1 | -| worker.sender.concurrency | Minimum number of threads in a sender worker pod | 4 | -| worker.sender.prefetch | Number of tasks consumed from the queue at once | 30 | -| worker.sender.autoscaling.enabled | Enabling autoscaling for sender worker pods | false | -| worker.sender.autoscaling.minReplicas | Minimum number of running sender worker pods when autoscaling is enabled | 2 | -| worker.sender.autoscaling.maxReplicas | Maximum number of running sender worker pods when autoscaling is enabled | 10 | -| worker.sender.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on sender worker pods to spawn another replica | 80 | -| worker.sender.resources.limits | The resource limit for the poller worker container | cpu: 500m | -| worker.sender.resources.requests | The requested resources for the poller worker container | cpu: 250m | -| worker.livenessProbe.enabled | Whether the liveness probe is enabled | false | -| worker.livenessProbe.exec.command | The exec command for the liveness probe to run in the container | Check values.yaml | -| worker.livenessProbe.initialDelaySeconds | Number of seconds after the container has started before liveness probe is initiated | 80 | -| worker.livenessProbe.periodSeconds | Frequency of performing the probe in seconds | 10 | -| worker.readinessProbe.enabled | Whether the readiness probe should be turned on or not | false | -| worker.readinessProbe.exec.command | The exec command for the readiness probe to run in the container | Check values.yaml | -| worker.readinessProbe.initialDelaySeconds | Number of seconds after the container has started before readiness probe is initiated | 30 | -| worker.readinessProbe.periodSeconds | Frequency of performing the probe in seconds | 5 | -| worker.taskTimeout | Task timeout in seconds when process takes a long time | 2400 | -| worker.walkRetryMaxInterval | Maximum time interval between walk attempts | 180 | -| worker.walkMaxRetries | Maximum number of walk retries | 5 | -| worker.ignoreNotIncreasingOid | Ignoring `occurred: OID not increasing` issues for hosts specified in the array | [] | -| worker.logLevel | Logging level, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | INFO | +| Variable | Description | Default | +|-------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|-------------------| +| worker.poller.replicaCount | Number of poller worker replicas | 2 | +| worker.poller.concurrency | Minimum number of threads in a poller worker pod | 4 | +| worker.poller.prefetch | Number of tasks consumed from the queue at once | 1 | +| worker.poller.autoscaling.enabled | Enabling autoscaling for poller worker pods | false | +| worker.poller.autoscaling.minReplicas | Minimum number of running poller worker pods when autoscaling is enabled | 2 | +| worker.poller.autoscaling.maxReplicas | Maximum number of running poller worker pods when autoscaling is enabled | 10 | +| worker.poller.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on poller worker pods to spawn another replica | 80 | +| worker.poller.resources.limits | The resources limits for poller worker container | cpu: 500m | +| worker.poller.resources.requests | The requested resources for poller worker container | cpu: 250m | +| worker.trap.replicaCount | Number of trap worker replicas | 2 | +| worker.trap.concurrency | Minimum number of threads in a trap worker pod | 4 | +| worker.trap.prefetch | Number of tasks consumed from the queue at once | 30 | +| worker.trap.resolveAddress.enabled | Enable reverse dns lookup of the IP address of the processed trap | false | +| worker.trap.resolveAddress.cacheSize | Maximum number of reverse dns lookup result records stored in cache | 500 | +| worker.trap.resolveAddress.cacheTTL | Time to live of the cached reverse dns lookup record in seconds | 1800 | +| worker.trap.autoscaling.enabled | Enabling autoscaling for trap worker pods | false | +| worker.trap.autoscaling.minReplicas | Minimum number of running trap worker pods when autoscaling is enabled | 2 | +| worker.trap.autoscaling.maxReplicas | Maximum number of running trap worker pods when autoscaling is enabled | 10 | +| worker.trap.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on trap worker pods to spawn another replica | 80 | +| worker.trap.resources.limits | The resource limit for the poller worker container | cpu: 500m | +| worker.trap.resources.requests | The requested resources for the poller worker container | cpu: 250m | +| worker.sender.replicaCount | The number of sender worker replicas | 1 | +| worker.sender.concurrency | Minimum number of threads in a sender worker pod | 4 | +| worker.sender.prefetch | Number of tasks consumed from the queue at once | 30 | +| worker.sender.autoscaling.enabled | Enabling autoscaling for sender worker pods | false | +| worker.sender.autoscaling.minReplicas | Minimum number of running sender worker pods when autoscaling is enabled | 2 | +| worker.sender.autoscaling.maxReplicas | Maximum number of running sender worker pods when autoscaling is enabled | 10 | +| worker.sender.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on sender worker pods to spawn another replica | 80 | +| worker.sender.resources.limits | The resource limit for the poller worker container | cpu: 500m | +| worker.sender.resources.requests | The requested resources for the poller worker container | cpu: 250m | +| worker.discovery.replicaCount | Number of discovery worker replicas | 1 | +| worker.discovery.concurrency | Minimum number of threads in a discovery worker pod | 4 | +| worker.discovery.prefetch | Number of tasks consumed from the queue at once | 30 | +| worker.discovery.autoscaling.enabled | Enabling autoscaling for discovery worker pods | false | +| worker.discovery.autoscaling.minReplicas | Minimum number of running discovery worker pods when autoscaling is enabled | 2 | +| worker.discovery.autoscaling.maxReplicas | Maximum number of running discovery worker pods when autoscaling is enabled | 10 | +| worker.discovery.autoscaling.targetCPUUtilizationPercentage | CPU % threshold that must be exceeded on discovery worker pods to spawn another replica | 80 | +| worker.discovery.resources.limits | The resources limits for discovery worker container | cpu: 500m | +| worker.discovery.resources.requests | The requested resources for discovery worker container | cpu: 250m | +| worker.livenessProbe.enabled | Whether the liveness probe is enabled | false | +| worker.livenessProbe.exec.command | The exec command for the liveness probe to run in the container | Check values.yaml | +| worker.livenessProbe.initialDelaySeconds | Number of seconds after the container has started before liveness probe is initiated | 80 | +| worker.livenessProbe.periodSeconds | Frequency of performing the probe in seconds | 10 | +| worker.readinessProbe.enabled | Whether the readiness probe should be turned on or not | false | +| worker.readinessProbe.exec.command | The exec command for the readiness probe to run in the container | Check values.yaml | +| worker.readinessProbe.initialDelaySeconds | Number of seconds after the container has started before readiness probe is initiated | 30 | +| worker.readinessProbe.periodSeconds | Frequency of performing the probe in seconds | 5 | +| worker.taskTimeout | Task timeout in seconds when process takes a long time | 2400 | +| worker.walkRetryMaxInterval | Maximum time interval between walk attempts | 180 | +| worker.walkMaxRetries | Maximum number of walk retries | 5 | +| worker.ignoreNotIncreasingOid | Ignoring `occurred: OID not increasing` issues for hosts specified in the array | [] | +| worker.logLevel | Logging level, possible options: DEBUG, INFO, WARNING, ERROR, CRITICAL, or FATAL | INFO | | worker.disableMongoDebugLogging | Disable extensive MongoDB and pymongo debug logging on SC4SNMP worker pods | true | -| worker.podAntiAffinity | [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | soft | -| worker.udpConnectionTimeout | Timeout for SNMP operations in seconds | 3 | -| worker.ignoreEmptyVarbinds | Ignores “Empty SNMP response message” in responses | false | +| worker.podAntiAffinity | [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | soft | +| worker.udpConnectionTimeout | Timeout for SNMP operations in seconds | 3 | +| worker.udpConnectionretries | Number of retries for SNMP operations | 5 | +| worker.ignoreEmptyVarbinds | Ignores “Empty SNMP response message” in responses | false | diff --git a/docs/microk8s/enable-ipv6.md b/docs/microk8s/enable-ipv6.md index fdf7d9eea..9784f03f5 100644 --- a/docs/microk8s/enable-ipv6.md +++ b/docs/microk8s/enable-ipv6.md @@ -64,6 +64,13 @@ traps: ipFamilies: ["IPv4", "IPv6"] ``` +To enable SNMP device detection for IPv4 and IPv6 subnet, you need to add the following configuration to `values.yaml` file: + +``` +discovery: + ipv6Enabled: true +``` + To configure poller to poll IPv4 and IPv6 addresses, you need to add the following configuration to the `values.yaml` file: ``` poller: diff --git a/docs/microk8s/mk8s/k8s-microk8s-scaling.md b/docs/microk8s/mk8s/k8s-microk8s-scaling.md index 8efc7a478..4306e5134 100644 --- a/docs/microk8s/mk8s/k8s-microk8s-scaling.md +++ b/docs/microk8s/mk8s/k8s-microk8s-scaling.md @@ -80,6 +80,8 @@ worker: replicaCount: 4 sender: replicaCount: 4 + discovery: + replicaCount: 4 ``` 3. Add `traps` replica count in `values.yaml`: @@ -104,28 +106,32 @@ microk8s kubectl get pods -n sc4snmp You should get 4 replicas for each worker and traps service: ```bash -NAME READY STATUS RESTARTS AGE -snmp-mibserver-5df74fb678-zkj9m 1/1 Running 0 25h -snmp-mongodb-6dc5c4f74d-xg6p7 2/2 Running 0 25h -snmp-redis-master-0 1/1 Running 0 25h -snmp-splunk-connect-for-snmp-inventory-k9t87 0/1 Completed 0 3m -snmp-splunk-connect-for-snmp-scheduler-76848cf748-57qbx 1/1 Running 0 25h -snmp-splunk-connect-for-snmp-trap-9f55664c4-9dv7d 1/1 Running 0 3m1s -snmp-splunk-connect-for-snmp-trap-9f55664c4-crgld 1/1 Running 0 3m1s -snmp-splunk-connect-for-snmp-trap-9f55664c4-sb768 1/1 Running 0 25h -snmp-splunk-connect-for-snmp-trap-9f55664c4-tkhcp 1/1 Running 0 3m1s -snmp-splunk-connect-for-snmp-worker-poller-7487956697-4hvpl 1/1 Running 0 21h -snmp-splunk-connect-for-snmp-worker-poller-7487956697-8bvnn 1/1 Running 0 3m1s -snmp-splunk-connect-for-snmp-worker-poller-7487956697-9dfgt 1/1 Running 0 3m1s -snmp-splunk-connect-for-snmp-worker-poller-7487956697-hlhvz 1/1 Running 0 24h -snmp-splunk-connect-for-snmp-worker-sender-657589666f-979d2 1/1 Running 0 3m1s -snmp-splunk-connect-for-snmp-worker-sender-657589666f-mrvg9 1/1 Running 0 3m1s -snmp-splunk-connect-for-snmp-worker-sender-657589666f-qtcr8 1/1 Running 0 21h -snmp-splunk-connect-for-snmp-worker-sender-657589666f-tc8sv 1/1 Running 0 24h -snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-6fbs2 1/1 Running 0 24h -snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-kdcdb 1/1 Running 0 3m1s -snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-sfxvb 1/1 Running 0 3m -snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-xmmwv 1/1 Running 0 21h +NAME READY STATUS RESTARTS AGE +snmp-mibserver-5df74fb678-zkj9m 1/1 Running 0 25h +snmp-mongodb-6dc5c4f74d-xg6p7 2/2 Running 0 25h +snmp-redis-master-0 1/1 Running 0 25h +snmp-splunk-connect-for-snmp-inventory-k9t87 0/1 Completed 0 3m +snmp-splunk-connect-for-snmp-scheduler-76848cf748-57qbx 1/1 Running 0 25h +snmp-splunk-connect-for-snmp-trap-9f55664c4-9dv7d 1/1 Running 0 3m1s +snmp-splunk-connect-for-snmp-trap-9f55664c4-crgld 1/1 Running 0 3m1s +snmp-splunk-connect-for-snmp-trap-9f55664c4-sb768 1/1 Running 0 25h +snmp-splunk-connect-for-snmp-trap-9f55664c4-tkhcp 1/1 Running 0 3m1s +snmp-splunk-connect-for-snmp-worker-poller-7487956697-4hvpl 1/1 Running 0 21h +snmp-splunk-connect-for-snmp-worker-poller-7487956697-8bvnn 1/1 Running 0 3m1s +snmp-splunk-connect-for-snmp-worker-poller-7487956697-9dfgt 1/1 Running 0 3m1s +snmp-splunk-connect-for-snmp-worker-poller-7487956697-hlhvz 1/1 Running 0 24h +snmp-splunk-connect-for-snmp-worker-sender-657589666f-979d2 1/1 Running 0 3m1s +snmp-splunk-connect-for-snmp-worker-sender-657589666f-mrvg9 1/1 Running 0 3m1s +snmp-splunk-connect-for-snmp-worker-sender-657589666f-qtcr8 1/1 Running 0 21h +snmp-splunk-connect-for-snmp-worker-sender-657589666f-tc8sv 1/1 Running 0 24h +snmp-splunk-connect-for-snmp-worker-discovery-7d9fdc5d56-js474 1/1 Running 0 3m1s +snmp-splunk-connect-for-snmp-worker-discovery-7d9fdc5d56-j423f 1/1 Running 0 3m1s +snmp-splunk-connect-for-snmp-worker-discovery-7d9fdc5d56-de45b 1/1 Running 0 21h +snmp-splunk-connect-for-snmp-worker-discovery-7d9fdc5d56-8fde5 1/1 Running 0 24h +snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-6fbs2 1/1 Running 0 24h +snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-kdcdb 1/1 Running 0 3m1s +snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-sfxvb 1/1 Running 0 3m +snmp-splunk-connect-for-snmp-worker-trap-859dc47d9b-xmmwv 1/1 Running 0 21h ``` ## Autoscaling SC4SNMP @@ -151,6 +157,11 @@ worker: enabled: true minReplicas: 5 maxReplicas: 10 + discovery: + autoscaling: + enabled: true + minReplicas: 5 + maxReplicas: 10 traps: autoscaling: @@ -174,30 +185,35 @@ microk8s kubectl get po -n sc4snmp After applying the changes, each worker and trap service will have from 5 to 10 instances: ```bash -NAME READY STATUS RESTARTS AGE -snmp-mibserver-6fdcdf9ddd-7bvmj 1/1 Running 0 25h -snmp-mongodb-6dc5c4f74d-6b7mf 2/2 Running 0 25h -snmp-redis-master-0 1/1 Running 0 25h -snmp-splunk-connect-for-snmp-inventory-sssgs 0/1 Completed 0 3m37s -snmp-splunk-connect-for-snmp-scheduler-5fcb6dcb44-r79ff 1/1 Running 0 25h -snmp-splunk-connect-for-snmp-trap-5788bc498c-62xsq 1/1 Running 0 2m10s -snmp-splunk-connect-for-snmp-trap-5788bc498c-bmlhg 1/1 Running 0 2m10s -snmp-splunk-connect-for-snmp-trap-5788bc498c-p7mkq 1/1 Running 0 2m10s -snmp-splunk-connect-for-snmp-trap-5788bc498c-t8q9c 1/1 Running 0 2m10s -snmp-splunk-connect-for-snmp-trap-5788bc498c-xjjp2 1/1 Running 0 24h -snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-25tbf 1/1 Running 0 16m -snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-dc6zr 1/1 Running 0 16m -snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-g7vpr 1/1 Running 0 16m -snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-gdkgq 1/1 Running 0 16m -snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-pg6cj 1/1 Running 0 24h -snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-56h9w 1/1 Running 0 24h -snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-hr54w 1/1 Running 0 16m -snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-j7wcn 1/1 Running 0 16m -snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-sgsdg 0/1 Pending 0 16m -snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-xrpfx 1/1 Running 0 16m -snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-79x2l 0/1 Pending 0 16m -snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-br7pf 1/1 Running 0 24h -snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-cnmh9 0/1 Pending 0 16m -snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-dhdgg 1/1 Running 0 16m -snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-wcwq5 0/1 Pending 0 16m +NAME READY STATUS RESTARTS AGE +snmp-mibserver-6fdcdf9ddd-7bvmj 1/1 Running 0 25h +snmp-mongodb-6dc5c4f74d-6b7mf 2/2 Running 0 25h +snmp-redis-master-0 1/1 Running 0 25h +snmp-splunk-connect-for-snmp-inventory-sssgs 0/1 Completed 0 3m37s +snmp-splunk-connect-for-snmp-scheduler-5fcb6dcb44-r79ff 1/1 Running 0 25h +snmp-splunk-connect-for-snmp-trap-5788bc498c-62xsq 1/1 Running 0 2m10s +snmp-splunk-connect-for-snmp-trap-5788bc498c-bmlhg 1/1 Running 0 2m10s +snmp-splunk-connect-for-snmp-trap-5788bc498c-p7mkq 1/1 Running 0 2m10s +snmp-splunk-connect-for-snmp-trap-5788bc498c-t8q9c 1/1 Running 0 2m10s +snmp-splunk-connect-for-snmp-trap-5788bc498c-xjjp2 1/1 Running 0 24h +snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-25tbf 1/1 Running 0 16m +snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-dc6zr 1/1 Running 0 16m +snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-g7vpr 1/1 Running 0 16m +snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-gdkgq 1/1 Running 0 16m +snmp-splunk-connect-for-snmp-worker-poller-5d76b9b675-pg6cj 1/1 Running 0 24h +snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-56h9w 1/1 Running 0 24h +snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-hr54w 1/1 Running 0 16m +snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-j7wcn 1/1 Running 0 16m +snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-sgsdg 0/1 Pending 0 16m +snmp-splunk-connect-for-snmp-worker-sender-7757fb7f89-xrpfx 1/1 Running 0 16m +snmp-splunk-connect-for-snmp-worker-discovery-7d9fdc5d56-js474 1/1 Running 0 24h +snmp-splunk-connect-for-snmp-worker-discovery-7d9fdc5d56-bfgr4 1/1 Running 0 16m +snmp-splunk-connect-for-snmp-worker-discovery-7d9fdc5d56-gt4rf 1/1 Running 0 16m +snmp-splunk-connect-for-snmp-worker-discovery-7d9fdc5d56-ku76g 0/1 Pending 0 16m +snmp-splunk-connect-for-snmp-worker-discovery-7d9fdc5d56-a243g 1/1 Running 0 16m +snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-79x2l 0/1 Pending 0 16m +snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-br7pf 1/1 Running 0 24h +snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-cnmh9 0/1 Pending 0 16m +snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-dhdgg 1/1 Running 0 16m +snmp-splunk-connect-for-snmp-worker-trap-6b8fd89868-wcwq5 0/1 Pending 0 16m ``` \ No newline at end of file diff --git a/docs/microk8s/sc4snmp-installation.md b/docs/microk8s/sc4snmp-installation.md index 97c9c8330..b45567f99 100644 --- a/docs/microk8s/sc4snmp-installation.md +++ b/docs/microk8s/sc4snmp-installation.md @@ -94,16 +94,17 @@ microk8s kubectl get pods -n sc4snmp Example output: ``` -NAME READY STATUS RESTARTS AGE -snmp-splunk-connect-for-snmp-scheduler-7ddbc8d75-bljsj 1/1 Running 0 133m -snmp-splunk-connect-for-snmp-worker-poller-57cd8f4665-9z9vx 1/1 Running 0 133m -snmp-splunk-connect-for-snmp-worker-sender-5c44cbb9c5-ppmb5 1/1 Running 0 133m -snmp-splunk-connect-for-snmp-worker-trap-549766d4-28qzh 1/1 Running 0 133m -snmp-mibserver-7f879c5b7c-hz9tz 1/1 Running 0 133m -snmp-mongodb-869cc8586f-vvr9f 2/2 Running 0 133m -snmp-redis-master-0 1/1 Running 0 133m -snmp-splunk-connect-for-snmp-trap-78759bfc8b-79m6d 1/1 Running 0 99m -snmp-splunk-connect-for-snmp-inventory-mjccw 0/1 Completed 0 6s +NAME READY STATUS RESTARTS AGE +snmp-splunk-connect-for-snmp-scheduler-7ddbc8d75-bljsj 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-worker-poller-57cd8f4665-9z9vx 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-worker-sender-5c44cbb9c5-ppmb5 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-worker-trap-549766d4-28qzh 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-worker-discovery-7d9fdc5d56-js474 1/1 Running 0 133m +snmp-mibserver-7f879c5b7c-hz9tz 1/1 Running 0 133m +snmp-mongodb-869cc8586f-vvr9f 2/2 Running 0 133m +snmp-redis-master-0 1/1 Running 0 133m +snmp-splunk-connect-for-snmp-trap-78759bfc8b-79m6d 1/1 Running 0 99m +snmp-splunk-connect-for-snmp-inventory-mjccw 0/1 Completed 0 6s ``` The output might vary depending on the configuration. In the above example, both polling and traps are configured, @@ -214,13 +215,14 @@ To uninstall SC4SNMP run the following commands: Example of pods terminating: ``` -NAME READY STATUS RESTARTS AGE -snmp-mibserver-bb8994c64-twk42 1/1 Terminating 2 (5h21m ago) 46h -snmp-splunk-connect-for-snmp-worker-sender-7f5557678b-psj97 1/1 Terminating 1 (5h21m ago) 22h -snmp-splunk-connect-for-snmp-worker-trap-dfcc487c-lh2dl 1/1 Terminating 1 (5h21m ago) 22h -snmp-splunk-connect-for-snmp-worker-trap-dfcc487c-5z5sq 1/1 Terminating 1 (5h21m ago) 22h -snmp-splunk-connect-for-snmp-trap-684d57dc8d-722tv 1/1 Terminating 1 (5h21m ago) 22h -snmp-splunk-connect-for-snmp-trap-684d57dc8d-z68lb 1/1 Terminating 1 (5h21m ago) 22h +NAME READY STATUS RESTARTS AGE +snmp-mibserver-bb8994c64-twk42 1/1 Terminating 2 (5h21m ago) 46h +snmp-splunk-connect-for-snmp-worker-sender-7f5557678b-psj97 1/1 Terminating 1 (5h21m ago) 22h +snmp-splunk-connect-for-snmp-worker-trap-dfcc487c-lh2dl 1/1 Terminating 1 (5h21m ago) 22h +snmp-splunk-connect-for-snmp-worker-discovery-7d9fdc5d56-js474 1/1 Terminating 1 (5h21m ago) 22h +snmp-splunk-connect-for-snmp-worker-trap-dfcc487c-5z5sq 1/1 Terminating 1 (5h21m ago) 22h +snmp-splunk-connect-for-snmp-trap-684d57dc8d-722tv 1/1 Terminating 1 (5h21m ago) 22h +snmp-splunk-connect-for-snmp-trap-684d57dc8d-z68lb 1/1 Terminating 1 (5h21m ago) 22h ``` ## Reinstall Splunk Connect for SNMP diff --git a/docs/troubleshooting/discovery-issues.md b/docs/troubleshooting/discovery-issues.md new file mode 100644 index 000000000..e135c3053 --- /dev/null +++ b/docs/troubleshooting/discovery-issues.md @@ -0,0 +1,70 @@ +# Troubleshooting Discovery Issues + + +## Permission denied while writing discovery file + +Discovery fails with a `PermissionError` related to `discovery_devices.csv`. In such cases, you may see the following error: + +```log +PermissionError: [Errno 13] Permission denied: '/app/discovery/discovery_devices.csv' +``` + +The folder specified in the `discoveryPath` value (which is mounted to `/app/discovery` inside the container) does not have the correct permissions for the application user (UID `10001`) to write files. + +Ensure that the folder specified in the `discoveryPath` has write permissions for UID `10001`. You can fix this by updating the folder ownership or permissions before starting the containers. + +**Example (on the host system):** +```bash +sudo chown 10001:10001 /your/local/folder/path +sudo chmod 755 /your/local/folder/path +``` + +## Discovery not completed within the time limit + +If the subnet being scanned has a large IP range (e.g., `/22`, `/21`, or bigger), the task may not be completed within the default time limit of **2400 seconds**. In such cases, you may see the following error: + +```log +[2025-08-07 06:03:29,415: ERROR/MainProcess] Hard time limit (2400s) exceeded for splunk_connect_for_snmp.discovery.tasks.discovery +``` + + +Increase the task timeout value using the `taskTimeout` field under the `worker` section in your `values.yaml`: + +```yaml +worker: + taskTimeout: 3600 # Increase based on expected duration +``` + +## Discovery takes too much time + +Discovery tasks may take longer to complete due to unnecessary SNMP requests or long wait times when scanning large subnets. Below are few ways to optimize performance: + +### Adjust Timeout and Retries + +If the subnet has very few SNMP-enabled devices, high timeout and retry values can significantly slow down the process. +For example, with the default `udpConnectionTimeout` of `3` seconds and `udpConnectionRetries` of `5`, a non-SNMP-enabled device will take up to **15 seconds** before moving to the next IP. +Consider lowering the retry parameters to speed up execution: + +```yaml +worker: + udpConnectionTimeout: 3 + udpConnectionRetries: 2 +``` + +!!! info + Reduce these values carefully. Setting them too low may cause missed detections in slow or high-latency networks, which can impact data accuracy. + +## No Output in `discovery_devices.csv` + +After running a discovery task, no entries are written to the `discovery_devices.csv` file. The issue might have several root causes. Some of them are: + +- Wrong device IP or port provided. +- Subnet contains no reachable or SNMP-enabled devices. +- Nmap is unable to detect live hosts due to network or firewall restrictions. +- For SNMPv2c: Incorrect community string. +- For SNMPv3: Incorrect privacy key or authentication credentials. + +**Resolution:** +- Double-check the IP range or subnet provided in the discovery config. +- Validate that the target devices have SNMP enabled and are reachable from the container. +- Verify SNMP credentials (community string or SNMPv3 credentials) for correctness. diff --git a/entrypoint.sh b/entrypoint.sh index 0af1c0839..0f7f80c94 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -9,6 +9,10 @@ wait-for-dep ${REDIS_DEPENDENCIES} "${MONGO_URI}" "${MIB_INDEX}" case $1 in +discovery) + discovery-loader + ;; + inventory) inventory-loader ;; @@ -18,6 +22,9 @@ celery) beat) celery -A splunk_connect_for_snmp.poller beat -l "$LOG_LEVEL" --max-interval=10 ;; + worker-discovery) + celery -A splunk_connect_for_snmp.poller worker -l "$LOG_LEVEL" -Q discovery --autoscale=8,"$WORKER_CONCURRENCY" + ;; worker-trap) celery -A splunk_connect_for_snmp.poller worker -l "$LOG_LEVEL" -Q traps --autoscale=8,"$WORKER_CONCURRENCY" ;; diff --git a/examples/polling_and_traps_v3.yaml b/examples/polling_and_traps_v3.yaml index 9eb76e497..dac79f81f 100644 --- a/examples/polling_and_traps_v3.yaml +++ b/examples/polling_and_traps_v3.yaml @@ -7,7 +7,7 @@ splunk: port: "8088" traps: # Remember to create sc4snmp-homesecure-sha-aes and sc4snmp-homesecure-sha-des secrets beforehand - # this is how to do it: https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/microk8s/configuration/snmpv3-configuration/ + # this is how to do it: https://splunk.github.io/splunk-connect-for-snmp/main/microk8s/configuration/snmpv3-configuration/ usernameSecrets: - sc4snmp-homesecure-sha-aes - sc4snmp-homesecure-sha-des diff --git a/integration_tests/.env b/integration_tests/.env index fc2103e08..91337163e 100644 --- a/integration_tests/.env +++ b/integration_tests/.env @@ -5,6 +5,7 @@ SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH= TRAPS_CONFIG_FILE_ABSOLUTE_PATH= INVENTORY_FILE_ABSOLUTE_PATH= COREFILE_ABS_PATH= +DISCOVERY_CONFIG_FILE_ABSOLUTE_PATH= COREDNS_ADDRESS=172.28.0.255 COREDNS_ADDRESS_IPv6=fd02:0:0:0:7fff:ffff:ffff:ffff SC4SNMP_VERSION=latest @@ -35,6 +36,11 @@ SPLUNK_HEC_PATH=/services/collector SPLUNK_AGGREGATE_TRAPS_EVENTS=false IGNORE_EMPTY_VARBINDS=false +# Discovery +DISCOVERY_ENABLE=true +DISCOVERY_LOG_LEVEL=INFO +DISCOVERY_PATH= + # Workers configration WALK_RETRY_MAX_INTERVAL=180 WALK_MAX_RETRIES=5 @@ -75,6 +81,15 @@ WORKER_TRAP_MEMORY_LIMIT=500M WORKER_TRAP_CPU_RESERVATIONS=0.5 WORKER_TRAP_MEMORY_RESERVATIONS=250M +# Worker Discovery +WORKER_DISCOVERY_CONCURRENCY=4 +PREFETCH_DISCOVERY_COUNT=30 +WORKER_DISCOVERY_REPLICAS=1 +WORKER_DISCOVERY_CPU_LIMIT=1 +WORKER_DISCOVERY_MEMORY_LIMIT=500M +WORKER_DISCOVERY_CPU_RESERVATIONS=0.5 +WORKER_DISCOVERY_MEMORY_RESERVATIONS=250M + # Inventory configuration INVENTORY_LOG_LEVEL=INFO CHAIN_OF_TASKS_EXPIRY_TIME=500 diff --git a/integration_tests/automatic_setup_compose.sh b/integration_tests/automatic_setup_compose.sh index 1579fa751..be207c9f5 100755 --- a/integration_tests/automatic_setup_compose.sh +++ b/integration_tests/automatic_setup_compose.sh @@ -44,7 +44,9 @@ deploy_poetry() { poetry install poetry add --group dev splunk-sdk poetry add --group dev splunklib - poetry add --group dev pysnmplib + poetry add --group dev pysnmp==7.1.8 + poetry add --group dev pytest-asyncio + poetry add --group dev pysnmpcrypto==0.0.4 } wait_for_containers_to_be_up() { @@ -104,15 +106,19 @@ cp ../docker_compose/* . SCHEDULER_CONFIG_FILE="scheduler-config.yaml" TRAPS_CONFIG_FILE="traps-config.yaml" INVENTORY_FILE="inventory-tests.csv" +DISCOVERY_CONFIG_FILE="discovery-config.yaml" COREFILE="Corefile" +DISCOVERY_FOLDER="discovery" # Get the absolute paths of the files SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH=$(realpath "$SCHEDULER_CONFIG_FILE") TRAPS_CONFIG_FILE_ABSOLUTE_PATH=$(realpath "$TRAPS_CONFIG_FILE") +DISCOVERY_CONFIG_FILE_ABSOLUTE_PATH=$(realpath "$DISCOVERY_CONFIG_FILE") INVENTORY_FILE_ABSOLUTE_PATH=$(realpath "$INVENTORY_FILE") COREFILE_ABS_PATH=$(realpath "$COREFILE") SPLUNK_HEC_HOST=$(hostname -I | cut -d " " -f1) SPLUNK_HEC_TOKEN=$(cat hec_token) +DISCOVERY_PATH=$(realpath "$DISCOVERY_FOLDER") # Temporary file to store the updated .env content TEMP_ENV_FILE=".env.tmp" @@ -120,16 +126,20 @@ TEMP_ENV_FILE=".env.tmp" # Update or add the variables in the .env file awk -v scheduler_path="$SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH" \ -v traps_path="$TRAPS_CONFIG_FILE_ABSOLUTE_PATH" \ + -v discovery_config_path="$DISCOVERY_CONFIG_FILE_ABSOLUTE_PATH" \ -v inventory_path="$INVENTORY_FILE_ABSOLUTE_PATH" \ -v corefile_path="$COREFILE_ABS_PATH" \ + -v discovery_path="$DISCOVERY_PATH" \ -v splunk_hec_host="$SPLUNK_HEC_HOST" \ -v splunk_hec_token="$SPLUNK_HEC_TOKEN" \ ' BEGIN { updated["SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH"] = 0; updated["TRAPS_CONFIG_FILE_ABSOLUTE_PATH"] = 0; + updated["DICOVERY_CONFIG_FILE_ABSOLUTE_PATH"] = 0; updated["INVENTORY_FILE_ABSOLUTE_PATH"] = 0; updated["COREFILE_ABS_PATH"] = 0; + updated["DISCOVERY_PATH"] = 0; updated["SPLUNK_HEC_HOST"] = 0; updated["SPLUNK_HEC_TOKEN"] = 0; } @@ -140,12 +150,18 @@ awk -v scheduler_path="$SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH" \ } else if ($1 == "TRAPS_CONFIG_FILE_ABSOLUTE_PATH=") { print "TRAPS_CONFIG_FILE_ABSOLUTE_PATH=" traps_path; updated["TRAPS_CONFIG_FILE_ABSOLUTE_PATH"] = 1; + } else if ($1 == "DISCOVERY_CONFIG_FILE_ABSOLUTE_PATH=") { + print "DISCOVERY_CONFIG_FILE_ABSOLUTE_PATH=" discovery_config_path; + updated["DISCOVERY_CONFIG_FILE_ABSOLUTE_PATH"] = 1; } else if ($1 == "INVENTORY_FILE_ABSOLUTE_PATH=") { print "INVENTORY_FILE_ABSOLUTE_PATH=" inventory_path; updated["INVENTORY_FILE_ABSOLUTE_PATH"] = 1; } else if ($1 == "COREFILE_ABS_PATH=") { print "COREFILE_ABS_PATH=" corefile_path; updated["COREFILE_ABS_PATH"] = 1; + } else if ($1 == "DISCOVERY_PATH=") { + print "DISCOVERY_PATH=" discovery_path; + updated["DISCOVERY_PATH"] = 1; } else if ($1 == "SPLUNK_HEC_HOST=") { print "SPLUNK_HEC_HOST=" splunk_hec_host; updated["SPLUNK_HEC_HOST"] = 1; @@ -163,12 +179,18 @@ awk -v scheduler_path="$SCHEDULER_CONFIG_FILE_ABSOLUTE_PATH" \ if (updated["TRAPS_CONFIG_FILE_ABSOLUTE_PATH"] == 0) { print "TRAPS_CONFIG_FILE_ABSOLUTE_PATH=" traps_path; } + if (updated["DISCOVERY_CONFIG_FILE_ABSOLUTE_PATH"] == 0) { + print "DISCOVERY_CONFIG_FILE_ABSOLUTE_PATH=" discovery_config_path; + } if (updated["INVENTORY_FILE_ABSOLUTE_PATH"] == 0) { print "INVENTORY_FILE_ABSOLUTE_PATH=" inventory_path; } if (updated["COREFILE_ABS_PATH"] == 0) { print "COREFILE_ABS_PATH=" corefile_path; } + if (updated["DISCOVERY_PATH"] == 0) { + print "DISCOVERY_PATH=" discovery_path; + } if (updated["SPLUNK_HEC_HOST"] == 0) { print "SPLUNK_HEC_HOST=" splunk_hec_host; } diff --git a/integration_tests/automatic_setup_microk8s.sh b/integration_tests/automatic_setup_microk8s.sh index 89718b8d7..4b79f7710 100755 --- a/integration_tests/automatic_setup_microk8s.sh +++ b/integration_tests/automatic_setup_microk8s.sh @@ -44,7 +44,9 @@ deploy_poetry() { poetry install poetry add --group dev splunk-sdk poetry add --group dev splunklib - poetry add --group dev pysnmplib + poetry add --group dev pysnmp==7.1.8 + poetry add --group dev pytest-asyncio + poetry add --group dev pysnmpcrypto==0.0.4 } wait_for_pod_initialization() { diff --git a/integration_tests/deploy_and_test.sh b/integration_tests/deploy_and_test.sh index 258ce4437..d490d58b1 100755 --- a/integration_tests/deploy_and_test.sh +++ b/integration_tests/deploy_and_test.sh @@ -91,7 +91,9 @@ deploy_poetry() { poetry install poetry add -D splunk-sdk poetry add -D splunklib - poetry add -D pysnmplib + poetry add -D pysnmp==7.1.8 + poetry add -D pytest-asyncio + poetry add -D pysnmpcrypto==0.0.4 } run_integration_tests() { diff --git a/integration_tests/discovery-config.yaml b/integration_tests/discovery-config.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/integration_tests/pytest.ini b/integration_tests/pytest.ini index edbb83602..5b7af3697 100644 --- a/integration_tests/pytest.ini +++ b/integration_tests/pytest.ini @@ -1,4 +1,8 @@ [pytest] +asyncio_mode = strict +asyncio_debug = true +log_cli = true +log_cli_level = DEBUG markers = part1: marks tests as belonging to part 1 of a test suite part2: marks tests as belonging to part 2 of a test suite diff --git a/integration_tests/test_trap_integration.py b/integration_tests/test_trap_integration.py index 94a6b11e9..71202eb26 100644 --- a/integration_tests/test_trap_integration.py +++ b/integration_tests/test_trap_integration.py @@ -13,11 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # ######################################################################## +import asyncio import logging -import time import pytest -from pysnmp.hlapi import * +from pysnmp.hlapi.v3arch.asyncio import * from integration_tests.splunk_test_utils import ( create_v3_secrets_compose, @@ -34,58 +34,55 @@ logger = logging.getLogger(__name__) -def send_trap( +async def send_trap( host, port, object_identity, mib_to_load, community, mp_model, *var_binds ): - iterator = sendNotification( + error_indication, error_status, error_index, varBinds = await send_notification( SnmpEngine(), CommunityData(community, mpModel=mp_model), - UdpTransportTarget((host, port)), + await UdpTransportTarget.create((host, port)), ContextData(), "trap", NotificationType(ObjectIdentity(object_identity)) - .addVarBinds(*var_binds) - .loadMibs(mib_to_load), + .add_varbinds(*var_binds) + .load_mibs(mib_to_load), ) - error_indication, error_status, error_index, var_binds = next(iterator) - if error_indication: logger.error(f"{error_indication}") -def send_v3_trap(host, port, object_identity, *var_binds): - iterator = sendNotification( +async def send_v3_trap(host, port, object_identity, *var_binds): + error_indication, error_status, error_index, varBinds = await send_notification( SnmpEngine(OctetString(hexValue="80003a8c04")), UsmUserData( - "snmp-poller", - "PASSWORD1", - "PASSWORD1", + userName="snmp-poller", + authKey="PASSWORD1", + privKey="PASSWORD1", authProtocol=(1, 3, 6, 1, 6, 3, 10, 1, 1, 3), privProtocol=(1, 3, 6, 1, 6, 3, 10, 1, 2, 4), ), - UdpTransportTarget((host, port)), + await UdpTransportTarget.create((host, port)), ContextData(), "trap", - NotificationType(ObjectIdentity(object_identity)).addVarBinds(*var_binds), + NotificationType(ObjectIdentity(object_identity)).add_varbinds(*var_binds), ) - error_indication, error_status, error_index, var_binds = next(iterator) - if error_indication: logger.error(f"{error_indication}") @pytest.mark.part6 -def test_trap_v1(request, setup_splunk): +@pytest.mark.asyncio +async def test_trap_v1(request, setup_splunk): trap_external_ip = request.config.getoption("trap_external_ip") logger.info(f"I have: {trap_external_ip}") - time.sleep(2) + await asyncio.sleep(2) # send trap varbind1 = ("1.3.6.1.6.3.1.1.4.3.0", "1.3.6.1.4.1.20408.4.1.1.2") varbind2 = ("1.3.6.1.2.1.1.4.0", OctetString("my contact")) - send_trap( + await send_trap( trap_external_ip, 162, "1.3.6.1.6.3.1.1.5.2", @@ -97,7 +94,7 @@ def test_trap_v1(request, setup_splunk): ) # wait for the message to be processed - time.sleep(5) + await asyncio.sleep(5) search_query = """search index="netops" sourcetype="sc4snmp:traps" earliest=-1m | head 1""" @@ -107,15 +104,16 @@ def test_trap_v1(request, setup_splunk): @pytest.mark.part6 -def test_trap_v2(request, setup_splunk): +@pytest.mark.asyncio +async def test_trap_v2(request, setup_splunk): trap_external_ip = request.config.getoption("trap_external_ip") logger.info(f"I have: {trap_external_ip}") - time.sleep(2) + await asyncio.sleep(2) # send trap varbind1 = ("1.3.6.1.6.3.1.1.4.3.0", "1.3.6.1.4.1.20408.4.1.1.2") varbind2 = ("1.3.6.1.2.1.1.1.0", OctetString("my system")) - send_trap( + await send_trap( trap_external_ip, 162, "1.3.6.1.6.3.1.1.5.2", @@ -127,7 +125,7 @@ def test_trap_v2(request, setup_splunk): ) # wait for the message to be processed - time.sleep(5) + await asyncio.sleep(5) search_query = """search index="netops" sourcetype="sc4snmp:traps" earliest=-1m | head 1""" @@ -137,19 +135,20 @@ def test_trap_v2(request, setup_splunk): @pytest.mark.part6 -def test_added_varbind(request, setup_splunk): +@pytest.mark.asyncio +async def test_added_varbind(request, setup_splunk): trap_external_ip = request.config.getoption("trap_external_ip") logger.info(f"I have: {trap_external_ip}") - time.sleep(2) + await asyncio.sleep(2) # send trap varbind1 = ("1.3.6.1.2.1.1.1.0", OctetString("test_added_varbind")) - send_trap( + await send_trap( trap_external_ip, 162, "1.3.6.1.2.1.2.1", "SNMPv2-MIB", "public", 1, varbind1 ) # wait for the message to be processed - time.sleep(5) + await asyncio.sleep(5) search_query = ( """search index="netops" "SNMPv2-MIB.sysDescr.value"="test_added_varbind" """ @@ -160,15 +159,16 @@ def test_added_varbind(request, setup_splunk): @pytest.mark.part6 -def test_many_traps(request, setup_splunk): +@pytest.mark.asyncio +async def test_many_traps(request, setup_splunk): trap_external_ip = request.config.getoption("trap_external_ip") logger.info(f"I have: {trap_external_ip}") - time.sleep(2) + await asyncio.sleep(2) # send trap varbind1 = ("1.3.6.1.2.1.1.1.0", OctetString("test_many_traps")) for _ in range(5): - send_trap( + await send_trap( trap_external_ip, 162, "1.3.6.1.2.1.2.1", @@ -179,7 +179,7 @@ def test_many_traps(request, setup_splunk): ) # wait for the message to be processed - time.sleep(2) + await asyncio.sleep(5) search_query = ( """search index="netops" "SNMPv2-MIB.sysDescr.value"="test_many_traps" """ @@ -191,15 +191,16 @@ def test_many_traps(request, setup_splunk): @pytest.mark.part6 -def test_more_than_one_varbind(request, setup_splunk): +@pytest.mark.asyncio +async def test_more_than_one_varbind(request, setup_splunk): trap_external_ip = request.config.getoption("trap_external_ip") logger.info(f"I have: {trap_external_ip}") - time.sleep(2) + await asyncio.sleep(2) # send trap varbind1 = ("1.3.6.1.2.1.1.4.0", OctetString("test_more_than_one_varbind_contact")) varbind2 = ("1.3.6.1.2.1.1.1.0", OctetString("test_more_than_one_varbind")) - send_trap( + await send_trap( trap_external_ip, 162, "1.3.6.1.2.1.2.1", @@ -211,7 +212,7 @@ def test_more_than_one_varbind(request, setup_splunk): ) # wait for the message to be processed - time.sleep(2) + await asyncio.sleep(2) search_query = """search index="netops" | search "SNMPv2-MIB.sysDescr.value"="test_more_than_one_varbind" "SNMPv2-MIB.sysContact.value"=test_more_than_one_varbind_contact """ @@ -222,14 +223,15 @@ def test_more_than_one_varbind(request, setup_splunk): @pytest.mark.part6 -def test_loading_mibs(request, setup_splunk): +@pytest.mark.asyncio +async def test_loading_mibs(request, setup_splunk): trap_external_ip = request.config.getoption("trap_external_ip") logger.info(f"I have: {trap_external_ip}") - time.sleep(2) + await asyncio.sleep(2) # send trap varbind1 = ("1.3.6.1.6.3.1.1.4.1.0", "1.3.6.1.4.1.15597.1.1.1.1.0.1") - send_trap( + await send_trap( trap_external_ip, 162, "1.3.6.1.4.1.15597.1.1.1.1", @@ -240,7 +242,7 @@ def test_loading_mibs(request, setup_splunk): ) # wait for the message to be processed - time.sleep(2) + await asyncio.sleep(2) search_query = """search index=netops "SNMPv2-MIB.snmpTrapOID.value"="AVAMAR-MCS-MIB::eventTrap" """ @@ -250,7 +252,8 @@ def test_loading_mibs(request, setup_splunk): @pytest.mark.part6 -def test_trap_v3(request, setup_splunk): +@pytest.mark.asyncio +async def test_trap_v3(request, setup_splunk): trap_external_ip = request.config.getoption("trap_external_ip") deployment = request.config.getoption("sc4snmp_deployment") if deployment == "microk8s": @@ -266,13 +269,13 @@ def test_trap_v3(request, setup_splunk): wait_for_pod_initialization_microk8s() else: wait_for_containers_initialization() - time.sleep(15) + await asyncio.sleep(20) # send trap varbind1 = ("1.3.6.1.2.1.1.4.0", OctetString("test_trap_v3")) - send_v3_trap(trap_external_ip, 162, "1.3.6.1.2.1.1.0", varbind1) + await send_v3_trap(trap_external_ip, 162, "1.3.6.1.2.1.1.0", varbind1) # wait for the message to be processed - time.sleep(2) + await asyncio.sleep(5) search_query = ( """search index=netops "SNMPv2-MIB.sysContact.value"="test_trap_v3" """ diff --git a/mkdocs.yml b/mkdocs.yml index eefd07ceb..3e32c4589 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -51,6 +51,7 @@ nav: - Inventory configuration: "dockercompose/3-inventory-configuration.md" - Scheduler configuration: "dockercompose/4-scheduler-configuration.md" - Traps configuration: "dockercompose/5-traps-configuration.md" + - Discovery configuration: "dockercompose/11-discovery-configuration.md" - .env file configuration: "dockercompose/6-env-file-configuration.md" - SNMPv3 secrets configuration: "dockercompose/7-snmpv3-secrets.md" - Offline installation: "dockercompose/8-offline-installation.md" @@ -76,6 +77,7 @@ nav: - SNMP data format: "microk8s/configuration/snmp-data-format.md" - Traps: "microk8s/configuration/trap-configuration.md" - Worker: "microk8s/configuration/worker-configuration.md" + - Discovery: "microk8s/configuration/discovery-configuration.md" - MongoDB: "microk8s/configuration/mongo-configuration.md" - Redis: "microk8s/configuration/redis-configuration.md" - SNMPv3 configuration: "microk8s/configuration/snmpv3-configuration.md" @@ -95,6 +97,7 @@ nav: - High Availability: "ha.md" - Lightweight installation: "small-environment.md" - Splunk dashboards: "dashboard.md" + - Discovery: "discovery.md" - Releases: "releases.md" - Request MIB: "mib-request.md" - Security: "security.md" @@ -106,6 +109,7 @@ nav: - General issues: "troubleshooting/general-issues.md" - Polling issues: "troubleshooting/polling-issues.md" - Traps issues: "troubleshooting/traps-issues.md" + - Discovery issues: "troubleshooting/discovery-issues.md" diff --git a/poetry.lock b/poetry.lock index 3f3dad23f..52d2e1842 100644 --- a/poetry.lock +++ b/poetry.lock @@ -15,19 +15,6 @@ files = [ [package.dependencies] vine = ">=5.0.0,<6.0.0" -[[package]] -name = "async-timeout" -version = "5.0.1" -description = "Timeout context manager for asyncio programs" -optional = false -python-versions = ">=3.8" -groups = ["main"] -markers = "python_full_version < \"3.11.3\"" -files = [ - {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, - {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, -] - [[package]] name = "attrs" version = "25.3.0" @@ -108,7 +95,6 @@ files = [ [package.dependencies] attrs = ">=24.3.0" -exceptiongroup = {version = ">=1.1.1", markers = "python_version < \"3.11\""} typing-extensions = ">=4.12.2" [package.extras] @@ -213,6 +199,104 @@ files = [ {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, ] +[[package]] +name = "cffi" +version = "2.0.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "platform_python_implementation != \"PyPy\"" +files = [ + {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"}, + {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"}, + {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"}, + {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"}, + {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"}, + {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"}, + {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"}, + {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"}, + {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"}, + {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"}, + {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"}, + {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"}, + {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"}, + {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"}, + {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"}, + {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"}, + {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"}, + {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"}, + {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"}, + {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"}, + {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"}, +] + +[package.dependencies] +pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} + [[package]] name = "charset-normalizer" version = "3.4.2" @@ -472,11 +556,85 @@ files = [ {file = "coverage-7.8.2.tar.gz", hash = "sha256:a886d531373a1f6ff9fad2a2ba4a045b68467b779ae729ee0b3b10ac20033b27"}, ] +[package.extras] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "cryptography" +version = "46.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = "!=3.9.0,!=3.9.1,>=3.8" +groups = ["main"] +files = [ + {file = "cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a"}, + {file = "cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc"}, + {file = "cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d"}, + {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb"}, + {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849"}, + {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8"}, + {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec"}, + {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91"}, + {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e"}, + {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926"}, + {file = "cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71"}, + {file = "cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac"}, + {file = "cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018"}, + {file = "cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb"}, + {file = "cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c"}, + {file = "cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217"}, + {file = "cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5"}, + {file = "cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715"}, + {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54"}, + {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459"}, + {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422"}, + {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7"}, + {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044"}, + {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665"}, + {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3"}, + {file = "cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20"}, + {file = "cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de"}, + {file = "cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914"}, + {file = "cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db"}, + {file = "cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21"}, + {file = "cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936"}, + {file = "cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683"}, + {file = "cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d"}, + {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0"}, + {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc"}, + {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3"}, + {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971"}, + {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac"}, + {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04"}, + {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506"}, + {file = "cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963"}, + {file = "cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4"}, + {file = "cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df"}, + {file = "cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f"}, + {file = "cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372"}, + {file = "cryptography-46.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a23582810fedb8c0bc47524558fb6c56aac3fc252cb306072fd2815da2a47c32"}, + {file = "cryptography-46.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e7aec276d68421f9574040c26e2a7c3771060bc0cff408bae1dcb19d3ab1e63c"}, + {file = "cryptography-46.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea"}, + {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b"}, + {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb"}, + {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717"}, + {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9"}, + {file = "cryptography-46.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c"}, + {file = "cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1"}, +] + [package.dependencies] -tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} +cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""} [package.extras] -toml = ["tomli ; python_full_version <= \"3.11.0a6\""] +docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"] +docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] +nox = ["nox[uv] (>=2024.4.15)"] +pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"] +sdist = ["build (>=1.0.0)"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi (>=2024)", "cryptography-vectors (==46.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test-randomorder = ["pytest-randomly"] [[package]] name = "dnspython" @@ -500,24 +658,17 @@ trio = ["trio (>=0.23)"] wmi = ["wmi (>=1.5.1)"] [[package]] -name = "exceptiongroup" -version = "1.3.0" -description = "Backport of PEP 654 (exception groups)" +name = "filelock" +version = "3.19.1" +description = "A platform independent file lock." optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -markers = "python_version == \"3.10\"" +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, - {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, + {file = "filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d"}, + {file = "filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} - -[package.extras] -test = ["pytest (>=6)"] - [[package]] name = "flower" version = "2.0.1" @@ -647,7 +798,7 @@ version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" -groups = ["dev"] +groups = ["main", "dev"] files = [ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, @@ -914,7 +1065,7 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" -groups = ["dev"] +groups = ["main", "dev"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -1408,54 +1559,28 @@ files = [ wcwidth = "*" [[package]] -name = "pycryptodomex" -version = "3.23.0" -description = "Cryptographic library for Python" +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, +] + +[[package]] +name = "pycparser" +version = "2.23" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" groups = ["main"] +markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\"" files = [ - {file = "pycryptodomex-3.23.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:add243d204e125f189819db65eed55e6b4713f70a7e9576c043178656529cec7"}, - {file = "pycryptodomex-3.23.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1c6d919fc8429e5cb228ba8c0d4d03d202a560b421c14867a65f6042990adc8e"}, - {file = "pycryptodomex-3.23.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:1c3a65ad441746b250d781910d26b7ed0a396733c6f2dbc3327bd7051ec8a541"}, - {file = "pycryptodomex-3.23.0-cp27-cp27m-win32.whl", hash = "sha256:47f6d318fe864d02d5e59a20a18834819596c4ed1d3c917801b22b92b3ffa648"}, - {file = "pycryptodomex-3.23.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:d9825410197a97685d6a1fa2a86196430b01877d64458a20e95d4fd00d739a08"}, - {file = "pycryptodomex-3.23.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:267a3038f87a8565bd834317dbf053a02055915acf353bf42ededb9edaf72010"}, - {file = "pycryptodomex-3.23.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:7b37e08e3871efe2187bc1fd9320cc81d87caf19816c648f24443483005ff886"}, - {file = "pycryptodomex-3.23.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:91979028227543010d7b2ba2471cf1d1e398b3f183cb105ac584df0c36dac28d"}, - {file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b8962204c47464d5c1c4038abeadd4514a133b28748bcd9fa5b6d62e3cec6fa"}, - {file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a33986a0066860f7fcf7c7bd2bc804fa90e434183645595ae7b33d01f3c91ed8"}, - {file = "pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7947ab8d589e3178da3d7cdeabe14f841b391e17046954f2fbcd941705762b5"}, - {file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c25e30a20e1b426e1f0fa00131c516f16e474204eee1139d1603e132acffc314"}, - {file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:da4fa650cef02db88c2b98acc5434461e027dce0ae8c22dd5a69013eaf510006"}, - {file = "pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:58b851b9effd0d072d4ca2e4542bf2a4abcf13c82a29fd2c93ce27ee2a2e9462"}, - {file = "pycryptodomex-3.23.0-cp313-cp313t-win32.whl", hash = "sha256:a9d446e844f08299236780f2efa9898c818fe7e02f17263866b8550c7d5fb328"}, - {file = "pycryptodomex-3.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:bc65bdd9fc8de7a35a74cab1c898cab391a4add33a8fe740bda00f5976ca4708"}, - {file = "pycryptodomex-3.23.0-cp313-cp313t-win_arm64.whl", hash = "sha256:c885da45e70139464f082018ac527fdaad26f1657a99ee13eecdce0f0ca24ab4"}, - {file = "pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:06698f957fe1ab229a99ba2defeeae1c09af185baa909a31a5d1f9d42b1aaed6"}, - {file = "pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b2c2537863eccef2d41061e82a881dcabb04944c5c06c5aa7110b577cc487545"}, - {file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43c446e2ba8df8889e0e16f02211c25b4934898384c1ec1ec04d7889c0333587"}, - {file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f489c4765093fb60e2edafdf223397bc716491b2b69fe74367b70d6999257a5c"}, - {file = "pycryptodomex-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdc69d0d3d989a1029df0eed67cc5e8e5d968f3724f4519bd03e0ec68df7543c"}, - {file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6bbcb1dd0f646484939e142462d9e532482bc74475cecf9c4903d4e1cd21f003"}, - {file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:8a4fcd42ccb04c31268d1efeecfccfd1249612b4de6374205376b8f280321744"}, - {file = "pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:55ccbe27f049743a4caf4f4221b166560d3438d0b1e5ab929e07ae1702a4d6fd"}, - {file = "pycryptodomex-3.23.0-cp37-abi3-win32.whl", hash = "sha256:189afbc87f0b9f158386bf051f720e20fa6145975f1e76369303d0f31d1a8d7c"}, - {file = "pycryptodomex-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:52e5ca58c3a0b0bd5e100a9fbc8015059b05cffc6c66ce9d98b4b45e023443b9"}, - {file = "pycryptodomex-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:02d87b80778c171445d67e23d1caef279bf4b25c3597050ccd2e13970b57fd51"}, - {file = "pycryptodomex-3.23.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:febec69c0291efd056c65691b6d9a339f8b4bc43c6635b8699471248fe897fea"}, - {file = "pycryptodomex-3.23.0-pp27-pypy_73-win32.whl", hash = "sha256:c84b239a1f4ec62e9c789aafe0543f0594f0acd90c8d9e15bcece3efe55eca66"}, - {file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ebfff755c360d674306e5891c564a274a47953562b42fb74a5c25b8fc1fb1cb5"}, - {file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eca54f4bb349d45afc17e3011ed4264ef1cc9e266699874cdd1349c504e64798"}, - {file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2596e643d4365e14d0879dc5aafe6355616c61c2176009270f3048f6d9a61f"}, - {file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdfac7cda115bca3a5abb2f9e43bc2fb66c2b65ab074913643803ca7083a79ea"}, - {file = "pycryptodomex-3.23.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:14c37aaece158d0ace436f76a7bb19093db3b4deade9797abfc39ec6cd6cc2fe"}, - {file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7de1e40a41a5d7f1ac42b6569b10bcdded34339950945948529067d8426d2785"}, - {file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bffc92138d75664b6d543984db7893a628559b9e78658563b0395e2a5fb47ed9"}, - {file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df027262368334552db2c0ce39706b3fb32022d1dce34673d0f9422df004b96a"}, - {file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e79f1aaff5a3a374e92eb462fa9e598585452135012e2945f96874ca6eeb1ff"}, - {file = "pycryptodomex-3.23.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:27e13c80ac9a0a1d050ef0a7e0a18cc04c8850101ec891815b6c5a0375e8a245"}, - {file = "pycryptodomex-3.23.0.tar.gz", hash = "sha256:71909758f010c82bc99b0abf4ea12012c98962fbf0583c2164f8b84533c2e4da"}, + {file = "pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934"}, + {file = "pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2"}, ] [[package]] @@ -1671,53 +1796,54 @@ all = ["filelock (>=3.0)", "redis (>=3.3,<4.0)", "redis-py-cluster (>=2.1.3,<3.0 docs = ["furo (>=2022.3.4,<2023.0.0)", "myst-parser (>=0.17)", "sphinx (>=4.3.0,<5.0.0)", "sphinx-autodoc-typehints (>=1.17,<2.0)", "sphinx-copybutton (>=0.5)", "sphinxcontrib-apidoc (>=0.3,<0.4)"] [[package]] -name = "pysnmp-pyasn1" -version = "1.1.3" -description = "ASN.1 types and codecs" +name = "pysmi" +version = "1.6.2" +description = "A pure-Python implementation of SNMP/SMI MIB parsing and conversion library." optional = false -python-versions = ">=3.8,<4.0" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pysnmp-pyasn1-1.1.3.tar.gz", hash = "sha256:fc559133ec6717e9d96dd4bd69c981310b23364dc2280a9b5f40f684fb6b4b8a"}, - {file = "pysnmp_pyasn1-1.1.3-py3-none-any.whl", hash = "sha256:d9a471b058adb9f2c3ce3aa85f800f2beef1a86c03b08d182a5653c9880fbd5e"}, + {file = "pysmi-1.6.2-py3-none-any.whl", hash = "sha256:45a3a3b25b9e0465e6a49e47ba70d5eab0424f6f1131ca406ca3f385f027247e"}, + {file = "pysmi-1.6.2.tar.gz", hash = "sha256:abed01673113886d10f0f336426859238fc13b5383c7e28e13dbcd5af0443ba1"}, ] +[package.dependencies] +Jinja2 = ">=3.1.3" +ply = ">=3.11" +requests = ">=2.26.0" + +[package.extras] +dev = ["black (==22.3.0)", "bump2version (>=1.0.1)", "doc8 (>=1.1.1)", "flake8 (>=5.0.4)", "flake8-docstrings (>=1.7.0)", "flake8-import-order (>=0.18.2)", "flake8-rst-docstrings (>=0.3.0)", "furo (>=2023.1.1)", "isort (>=5.10.1)", "pep8-naming (>=0.14.1)", "pre-commit (==2.21.0)", "pysnmp (>=7.1.16)", "pytest (>=6.2.5)", "pytest-cov (>=3.0.0)", "sphinx (>=7.0.0,<8.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-notfound-page (>=1.0.0)", "sphinx-sitemap-lextudio (>=2.5.2)"] + [[package]] -name = "pysnmp-pysmi" -version = "1.1.12" -description = "" +name = "pysnmp" +version = "7.1.8" +description = "A Python library for SNMP" optional = false -python-versions = "<4.0,>=3.8" +python-versions = "<4.0,>=3.9" groups = ["main"] files = [ - {file = "pysnmp_pysmi-1.1.12-py3-none-any.whl", hash = "sha256:a868be988f6578323a8b7eb7c8be7a8a539b061de90b8c2b5ddfdc8f9d376ba2"}, - {file = "pysnmp_pysmi-1.1.12.tar.gz", hash = "sha256:7d12ee12fb99c08449318430ce1b1633f6eee5d938e880215f41cc9976ecdcbb"}, + {file = "pysnmp-7.1.8-py3-none-any.whl", hash = "sha256:94145bd960a4473be7102d10f2f290c2f3027316900179131d67d59e6c135307"}, + {file = "pysnmp-7.1.8.tar.gz", hash = "sha256:03d88f0ba21e0357ad58c9566d369dd6123b5b628840f91001d40c0f5b658933"}, ] [package.dependencies] -ply = ">=3.11,<4.0" -requests = ">=2.31.0,<3.0.0" +pyasn1 = ">=0.4.8,<0.5.0 || >0.5.0" [[package]] -name = "pysnmplib" -version = "5.0.24" -description = "" +name = "pysnmpcrypto" +version = "0.0.4" +description = "Strong cryptography support for PySNMP (SNMP library for Python)" optional = false -python-versions = "^3.8" +python-versions = "*" groups = ["main"] -files = [] -develop = false +files = [ + {file = "pysnmpcrypto-0.0.4-py2.py3-none-any.whl", hash = "sha256:5889733caa030f45d9e03ea9d6370fb06426a8cb7f839aabbcdde33c6f634679"}, + {file = "pysnmpcrypto-0.0.4.tar.gz", hash = "sha256:b635fb3b1ec6637b9a0033f50506214e16eb84574b1d25ab027bbde4caa55129"}, +] [package.dependencies] -pycryptodomex = "^3.11.0" -pysnmp-pyasn1 = "^1.1.3" -pysnmp-pysmi = "^1.0.4" - -[package.source] -type = "git" -url = "https://github.com/pysnmp/pysnmp.git" -reference = "main" -resolved_reference = "4891556e7db831a5a9b27d4bad8ff102609b2a2c" +cryptography = {version = "*", markers = "python_version >= \"3.4\""} [[package]] name = "pytest" @@ -1733,16 +1859,34 @@ files = [ [package.dependencies] colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1", markers = "python_version < \"3.11\""} iniconfig = ">=1" packaging = ">=20" pluggy = ">=1.5,<2" pygments = ">=2.7.2" -tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest-asyncio" +version = "1.2.0" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest_asyncio-1.2.0-py3-none-any.whl", hash = "sha256:8e17ae5e46d8e7efe51ab6494dd2010f4ca8dae51652aa3c8d55acf50bfb2e99"}, + {file = "pytest_asyncio-1.2.0.tar.gz", hash = "sha256:c609a64a2a8768462d0c99811ddb8bd2583c33fd33cf7f21af1c142e824ffb57"}, +] + +[package.dependencies] +pytest = ">=8.2,<9" +typing-extensions = {version = ">=4.12", markers = "python_version < \"3.13\""} + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + [[package]] name = "pytest-cov" version = "6.1.1" @@ -1894,9 +2038,6 @@ files = [ {file = "redis-6.2.0.tar.gz", hash = "sha256:e821f129b75dde6cb99dd35e5c76e8c49512a5a0d8dfdc560b2fbd44b85ca977"}, ] -[package.dependencies] -async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""} - [package.extras] hiredis = ["hiredis (>=3.2.0)"] jwt = ["pyjwt (>=2.9.0)"] @@ -2250,49 +2391,6 @@ all = ["tornado (>=4.0)", "twisted"] tornado = ["tornado (>=4.0)"] twisted = ["twisted"] -[[package]] -name = "tomli" -version = "2.2.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -markers = "python_full_version <= \"3.11.0a6\"" -files = [ - {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, - {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, - {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, - {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, - {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, - {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, - {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, - {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, - {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, - {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, -] - [[package]] name = "tornado" version = "6.5.1" @@ -2326,7 +2424,7 @@ files = [ {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, ] -markers = {dev = "python_version == \"3.10\""} +markers = {dev = "python_version == \"3.12\""} [[package]] name = "tzdata" @@ -2599,5 +2697,5 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" -python-versions = ">=3.10,<3.12" -content-hash = "1f0b0fd25a8222113dfded6c013d08b5186e14b65080429666d1f90f73a59490" +python-versions = ">=3.12,<3.14" +content-hash = "ca3b064ec7ec4c46dd66f597ad6d287cb6548b154e71b1604a763501542bcd24" diff --git a/pyproject.toml b/pyproject.toml index 3839f3d40..f24562c9b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,6 +10,7 @@ include = ["splunk_connect_for_snmp/profiles/*.yaml"] [tool.poetry.scripts] traps = 'splunk_connect_for_snmp.traps:main' inventory-loader = 'splunk_connect_for_snmp.inventory.loader:load' +discovery-loader = 'splunk_connect_for_snmp.discovery.loader:load' run-walk = 'splunk_connect_for_snmp.walk:run_walk' [tool.pytest.ini_options] @@ -21,7 +22,7 @@ testpaths = ["test"] python_files = ["test_*.py"] [tool.poetry.dependencies] -python = ">=3.10,<3.12" +python = ">=3.12,<3.14" pymongo = {extras = ["srv"], version = "^4.0.0"} requests = {extras = ["crypto"], version = "^2.31.0"} celery = {extras = ["tblib"], version = "5.5.3"} @@ -43,10 +44,13 @@ mongolock = "^1.3.4" pika = "^1.2.0" JSON-log-formatter ="^1.0.0" "ruamel.yaml" = "^0.18.0" -pysnmplib = {git = "https://github.com/pysnmp/pysnmp.git", branch = "main"} urllib3 = "^2.0.0" jsonschema = "4.24.0" flower = "^2.0.1" +filelock = "^3.18.0" +pysnmpcrypto = "0.0.4" +pysnmp = "7.1.8" +pysmi = "^1.6.2" [tool.poetry.group.dev.dependencies] pytest = "^8.0.0" @@ -56,6 +60,7 @@ mkdocs = "^1.2.2" mkdocs-material = "^9.0.0" python-dotenv = "^1.0.0" mkdocs-video = "^1.5.0" +pytest-asyncio = "^1.2.0" [build-system] requires = ["poetry>=0.12"] diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..f19ee31a3 --- /dev/null +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,28 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + autodiscovery: + discovery_version2c: + community: public + delete_already_discovered: false + device_rules: + - group: linux_group + name: Linux servers + patterns: '*linux*' + - group: centos_group + name: Centos servers + patterns: '*centos*' + frequency: 21600 + network_address: 54.82.41.24/28 + port: 161 + version: 2c + discoveryPath: /home/devuser/discovery + enabled: true + ipv6Enabled: true + logLevel: INFO + usernameSecrets: [] diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/discovery/job.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/discovery/job.yaml new file mode 100644 index 000000000..245f36f10 --- /dev/null +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/discovery/job.yaml @@ -0,0 +1,64 @@ +--- +# Source: splunk-connect-for-snmp/templates/discovery/job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-splunk-connect-for-snmp-discovery + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-discovery + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + ttlSecondsAfterFinished: 300 + template: + metadata: + # + + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-discovery + app.kubernetes.io/instance: release-name + spec: + containers: + - name: splunk-connect-for-snmp-discovery + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + ["discovery"] + env: + - name: REDIS_HOST + value: release-name-redis + - name: REDIS_PORT + value: "6379" + - name: REDIS_DB + value: "1" + - name: CELERY_DB + value: "0" + - name: DISCOVERY_CONFIG_PATH + value: /app/discovery/discovery-config.yaml + - name: LOG_LEVEL + value: INFO + - name: CHAIN_OF_TASKS_EXPIRY_TIME + value: "60" + - name: CELERY_TASK_TIMEOUT + value: "2400" + volumeMounts: + - name: discovery-config + mountPath: "/app/discovery" + readOnly: true + - name: tmp + mountPath: "/tmp/" + readOnly: false + + volumes: + # # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: discovery-config + configMap: + name: splunk-connect-for-snmp-discovery-config + items: + - key: "discovery-config.yaml" + path: "discovery-config.yaml" + - name: tmp + emptyDir: {} + restartPolicy: OnFailure diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index 38e0d7d50..671dadab8 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "beat", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: REDIS_MODE diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 6851a86ec..dce4a4bf8 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -31,5 +31,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + restartPolicy: Never diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml index 8beee8755..4079ae818 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -44,6 +44,8 @@ spec: "trap" ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: MONGO_URI diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/discovery/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/discovery/deployment.yaml new file mode 100644 index 000000000..975fb6a06 --- /dev/null +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/discovery/deployment.yaml @@ -0,0 +1,158 @@ +--- +# Source: splunk-connect-for-snmp/templates/worker/discovery/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: release-name-splunk-connect-for-snmp-worker-discovery + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-discovery + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-discovery + app.kubernetes.io/instance: release-name + template: + metadata: + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-discovery + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: release-name-splunk-connect-for-snmp-user + securityContext: + fsGroup: 10001 + containers: + - name: splunk-connect-for-snmp-discovery + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + [ + "celery", "worker-discovery", + ] + env: + - name: USER + value: "sc4snmp" + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: SC4SNMP_VERSION + value: CURRENT-VERSION + - name: REDIS_MODE + value: "standalone" + - name: REDIS_HOST + value: release-name-redis + - name: REDIS_PORT + value: "6379" + - name: REDIS_DB + value: "1" + - name: CELERY_DB + value: "0" + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: WALK_RETRY_MAX_INTERVAL + value: "180" + - name: WALK_MAX_RETRIES + value: "5" + - name: METRICS_INDEXING_ENABLED + value: "false" + - name: POLL_BASE_PROFILES + value: "true" + - name: LOG_LEVEL + value: INFO + - name: DISABLE_MONGO_DEBUG_LOGGING + value: "true" + - name: UDP_CONNECTION_TIMEOUT + value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" + - name: MAX_OID_TO_PROCESS + value: "70" + - name: MAX_REPETITIONS + value: "10" + - name: PYSNMP_DEBUG + value: "" + - name: PROFILES_RELOAD_DELAY + value: "60" + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: SPLUNK_HEC_SCHEME + value: "https" + - name: SPLUNK_HEC_HOST + value: "10.202.18.152" + - name: IGNORE_EMPTY_VARBINDS + value: "false" + - name: SPLUNK_HEC_PORT + value: "8088" + - name: SPLUNK_HEC_INSECURESSL + value: "true" + - name: SPLUNK_AGGREGATE_TRAPS_EVENTS + value: "false" + - name: SPLUNK_METRIC_NAME_HYPHEN_TO_UNDERSCORE + value: "false" + - name: SPLUNK_HEC_TOKEN + valueFrom: + secretKeyRef: + name: splunk-connect-for-snmp-splunk + key: hec_token + - name: SPLUNK_HEC_INDEX_EVENTS + value: netops + - name: SPLUNK_HEC_INDEX_METRICS + value: netmetrics + - name: SPLUNK_SOURCETYPE_TRAPS + value: "sc4snmp:traps" + - name: SPLUNK_SOURCETYPE_POLLING_EVENTS + value: "sc4snmp:event" + - name: SPLUNK_SOURCETYPE_POLLING_METRICS + value: "sc4snmp:metric" + - name: DISCOVERY_FOLDER_PATH + value: /app/discovery + - name: CELERY_TASK_TIMEOUT + value: "2400" + - name: IPv6_ENABLED + value: "true" + + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + - name: discovery-volume + mountPath: /app/discovery + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} + - name: discovery-volume + hostPath: + path: /home/devuser/discovery + type: Directory diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml index f3ce888d1..a261d9964 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-poller", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index 18f7815e9..012920ef4 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index 4d432158a..1a437570b 100644 --- a/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-trap", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..8f5acde96 --- /dev/null +++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,12 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + enabled: false + ipv6Enabled: false + logLevel: INFO + usernameSecrets: [] diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index 38e0d7d50..671dadab8 100644 --- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "beat", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: REDIS_MODE diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 6851a86ec..dce4a4bf8 100644 --- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -31,5 +31,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + restartPolicy: Never diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml index a4cca077b..b94c27d42 100644 --- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -43,6 +43,8 @@ spec: "trap" ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: MONGO_URI diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml index 23f6532fe..d18f334e6 100644 --- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -43,6 +43,8 @@ spec: "celery", "worker-poller", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -73,6 +75,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index cf7160b80..3b2ccf988 100644 --- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -43,6 +43,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -73,6 +75,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index 88a082662..5202bb8a2 100644 --- a/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -43,6 +43,8 @@ spec: "celery", "worker-trap", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -73,6 +75,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..8f5acde96 --- /dev/null +++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,12 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + enabled: false + ipv6Enabled: false + logLevel: INFO + usernameSecrets: [] diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index 38e0d7d50..671dadab8 100644 --- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "beat", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: REDIS_MODE diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 6851a86ec..dce4a4bf8 100644 --- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -31,5 +31,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + restartPolicy: Never diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/deployment.yaml index a4cca077b..b94c27d42 100644 --- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -43,6 +43,8 @@ spec: "trap" ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: MONGO_URI diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml index 23f6532fe..d18f334e6 100644 --- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -43,6 +43,8 @@ spec: "celery", "worker-poller", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -73,6 +75,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index cf7160b80..3b2ccf988 100644 --- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -43,6 +43,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -73,6 +75,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index 88a082662..5202bb8a2 100644 --- a/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_autoscaling_enabled_deprecated/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -43,6 +43,8 @@ spec: "celery", "worker-trap", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -73,6 +75,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..8f5acde96 --- /dev/null +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,12 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + enabled: false + ipv6Enabled: false + logLevel: INFO + usernameSecrets: [] diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index 38e0d7d50..671dadab8 100644 --- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "beat", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: REDIS_MODE diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 6851a86ec..dce4a4bf8 100644 --- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -31,5 +31,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + restartPolicy: Never diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/deployment.yaml index 8beee8755..4079ae818 100644 --- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -44,6 +44,8 @@ spec: "trap" ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: MONGO_URI diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml index f3ce888d1..a261d9964 100644 --- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-poller", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index 18f7815e9..012920ef4 100644 --- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index 4d432158a..1a437570b 100644 --- a/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_enable_ui/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-trap", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..8f5acde96 --- /dev/null +++ b/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,12 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + enabled: false + ipv6Enabled: false + logLevel: INFO + usernameSecrets: [] diff --git a/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 6851a86ec..dce4a4bf8 100644 --- a/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -31,5 +31,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + restartPolicy: Never diff --git a/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/traps/deployment.yaml index 8beee8755..4079ae818 100644 --- a/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -44,6 +44,8 @@ spec: "trap" ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: MONGO_URI diff --git a/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index 18f7815e9..012920ef4 100644 --- a/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index 4d432158a..1a437570b 100644 --- a/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_metallb_false/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-trap", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..8f5acde96 --- /dev/null +++ b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,12 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + enabled: false + ipv6Enabled: false + logLevel: INFO + usernameSecrets: [] diff --git a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index 38e0d7d50..671dadab8 100644 --- a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "beat", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: REDIS_MODE diff --git a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 6851a86ec..dce4a4bf8 100644 --- a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -31,5 +31,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + restartPolicy: Never diff --git a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/traps/deployment.yaml index 8beee8755..4079ae818 100644 --- a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -44,6 +44,8 @@ spec: "trap" ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: MONGO_URI diff --git a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml index f3ce888d1..a261d9964 100644 --- a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +++ b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-poller", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index 18f7815e9..012920ef4 100644 --- a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index 4d432158a..1a437570b 100644 --- a/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_mongodb_custom_image/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-trap", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..fb9e937c0 --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,41 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + autodiscovery: + discovery_version2c: + community: public + delete_already_discovered: false + device_rules: + - group: linux_group + name: Linux servers + patterns: '*linux*' + - group: centos_group + name: Centos servers + patterns: '*centos*' + frequency: 21600 + network_address: 4.91.99.113/28 + port: 161 + version: 2c + discovery_version3: + delete_already_discovered: false + device_rules: + - group: linux_group + name: Linux VM + patterns: '*Linux*' + frequency: 21600 + network_address: 4.91.99.113/28 + port: 161 + secret: secret + security_engine: 80001f8880e761866965756b6800000000 + version: "3" + discoveryPath: /home/devuser/discovery-test2 + enabled: true + ipv6Enabled: true + logLevel: INFO + usernameSecrets: + - secret diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/common/scheduler-config.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/common/scheduler-config.yaml new file mode 100644 index 000000000..2ace18f92 --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/common/scheduler-config.yaml @@ -0,0 +1,21 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/scheduler-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-config + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-scheduler + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: |- + communities: + public: + communityIndex: + contextEngineId: + contextName: + tag: + securityName: diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/common/splunk-secret.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/common/splunk-secret.yaml new file mode 100644 index 000000000..21e689f0a --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/common/splunk-secret.yaml @@ -0,0 +1,9 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/splunk-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: splunk-connect-for-snmp-splunk +type: Opaque +data: + hec_token: "MDAwMDAwMDAtMDAwMC0wMDAwLTAwMDAtMDAwMDAwMDAwMDAw" diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/discovery/job.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/discovery/job.yaml new file mode 100644 index 000000000..245f36f10 --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/discovery/job.yaml @@ -0,0 +1,64 @@ +--- +# Source: splunk-connect-for-snmp/templates/discovery/job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: release-name-splunk-connect-for-snmp-discovery + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-discovery + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + ttlSecondsAfterFinished: 300 + template: + metadata: + # + + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-discovery + app.kubernetes.io/instance: release-name + spec: + containers: + - name: splunk-connect-for-snmp-discovery + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + ["discovery"] + env: + - name: REDIS_HOST + value: release-name-redis + - name: REDIS_PORT + value: "6379" + - name: REDIS_DB + value: "1" + - name: CELERY_DB + value: "0" + - name: DISCOVERY_CONFIG_PATH + value: /app/discovery/discovery-config.yaml + - name: LOG_LEVEL + value: INFO + - name: CHAIN_OF_TASKS_EXPIRY_TIME + value: "60" + - name: CELERY_TASK_TIMEOUT + value: "2400" + volumeMounts: + - name: discovery-config + mountPath: "/app/discovery" + readOnly: true + - name: tmp + mountPath: "/tmp/" + readOnly: false + + volumes: + # # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: discovery-config + configMap: + name: splunk-connect-for-snmp-discovery-config + items: + - key: "discovery-config.yaml" + path: "discovery-config.yaml" + - name: tmp + emptyDir: {} + restartPolicy: OnFailure diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/redis/redis-config.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/redis/redis-config.yaml new file mode 100644 index 000000000..22da33840 --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/redis/redis-config.yaml @@ -0,0 +1,33 @@ +--- +# Source: splunk-connect-for-snmp/templates/redis/redis-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: release-name-redis-config + namespace: default + labels: + app: release-name-redis +data: + redis.conf: | + # Data directory + dir /data + + # Persistence - RDB + save 900 1 + save 300 10 + save 60 10000 + + # Persistence - AOF + appendonly yes + appendfsync everysec + + # Logging + loglevel notice + + # Memory + maxmemory-policy noeviction + + # Network + bind 0.0.0.0 + protected-mode no + port 6379 diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/redis/redis-standalone-service.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/redis/redis-standalone-service.yaml new file mode 100644 index 000000000..5b3445437 --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/redis/redis-standalone-service.yaml @@ -0,0 +1,15 @@ +--- +# Source: splunk-connect-for-snmp/templates/redis/redis-standalone-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: release-name-redis + namespace: default +spec: + type: ClusterIP + ports: + - port: 6379 + targetPort: 6379 + name: redis + selector: + app: release-name-redis diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/redis/redis-standalone-statefulset.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/redis/redis-standalone-statefulset.yaml new file mode 100644 index 000000000..49876b6fc --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/redis/redis-standalone-statefulset.yaml @@ -0,0 +1,107 @@ +--- +# Source: splunk-connect-for-snmp/templates/redis/redis-standalone-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: release-name-redis-standalone + namespace: default + labels: + app: release-name-redis +spec: + serviceName: release-name-redis + replicas: 1 + selector: + matchLabels: + app: release-name-redis + template: + metadata: + labels: + app: release-name-redis + annotations: + checksum/redis-config: e82c09fa615350d9c147a0884485f953308babd6b8842d0cbe695ed5595eb530 + spec: + securityContext: + runAsUser: 999 + fsGroup: 999 + initContainers: + - name: fix-permissions + image: redis:8.2.2 + imagePullPolicy: IfNotPresent + command: + - sh + - -c + - | + echo "=== Redis Init: Fixing Permissions ===" + echo "Current ownership:" + ls -ln /data + echo "" + echo "Fixing ownership to 999:999..." + chown -R 999:999 /data + chmod -R 755 /data + echo "" + echo "New ownership:" + ls -ln /data + echo "=== Permissions Fixed ===" + volumeMounts: + - name: redis-data + mountPath: /data + securityContext: + runAsUser: 0 # Must run as root to chown + containers: + - name: redis + image: redis:8.2.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 6379 + name: redis + command: + - sh + - -c + args: + - | + # Copy config to writable location + cp /etc/redis/redis.conf /tmp/redis.conf + + # Start Redis + exec redis-server /tmp/redis.conf + volumeMounts: + - name: redis-data + mountPath: /data + - name: redis-config + mountPath: /etc/redis + resources: + {} + livenessProbe: + exec: + command: + - sh + - -c + - | + redis-cli ping + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + exec: + command: + - sh + - -c + - | + redis-cli ping + initialDelaySeconds: 5 + periodSeconds: 5 + # Storage enabled but no existing PVC - use volumeClaimTemplates below + volumes: + - name: redis-config + configMap: + name: release-name-redis-config + # No existing PVC found, create new one via volumeClaimTemplates + volumeClaimTemplates: + - metadata: + name: redis-data + spec: + accessModes: + - ReadWriteOnce + storageClassName: microk8s-hostpath + resources: + requests: + storage: 5Gi diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/serviceaccount.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/serviceaccount.yaml new file mode 100644 index 000000000..59ae809f1 --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +--- +# Source: splunk-connect-for-snmp/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: release-name-splunk-connect-for-snmp-user + labels: + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/sim/pdb.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/sim/pdb.yaml new file mode 100644 index 000000000..0f1827e83 --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/sim/pdb.yaml @@ -0,0 +1,18 @@ +--- +# Source: splunk-connect-for-snmp/templates/sim/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: release-name-splunk-connect-for-snmp-sim + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-sim + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + minAvailable: 80% + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-sim + app.kubernetes.io/instance: release-name diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/tests/test-connection.yaml new file mode 100644 index 000000000..dce4a4bf8 --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -0,0 +1,39 @@ +--- +# Source: splunk-connect-for-snmp/templates/tests/test-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "release-name-splunk-connect-for-snmp-trap-test-connection" + labels: + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test + "kube-score/ignore": "pod-probes,pod-networkpolicy" +spec: + containers: + - name: wget + image: busybox:1.34.1 + imagePullPolicy: Always + command: ['wget'] + args: ['release-name-splunk-connect-for-snmp-trap:162'] + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 + resources: + limits: + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + + restartPolicy: Never diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/worker/discovery/deployment.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/worker/discovery/deployment.yaml new file mode 100644 index 000000000..6ead8122c --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/worker/discovery/deployment.yaml @@ -0,0 +1,162 @@ +--- +# Source: splunk-connect-for-snmp/templates/worker/discovery/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: release-name-splunk-connect-for-snmp-worker-discovery + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-discovery + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-discovery + app.kubernetes.io/instance: release-name + template: + metadata: + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-discovery + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: release-name-splunk-connect-for-snmp-user + securityContext: + fsGroup: 10001 + containers: + - name: splunk-connect-for-snmp-discovery + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + [ + "celery", "worker-discovery", + ] + env: + - name: USER + value: "sc4snmp" + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: SC4SNMP_VERSION + value: CURRENT-VERSION + - name: REDIS_MODE + value: "standalone" + - name: REDIS_HOST + value: release-name-redis + - name: REDIS_PORT + value: "6379" + - name: REDIS_DB + value: "1" + - name: CELERY_DB + value: "0" + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: WALK_RETRY_MAX_INTERVAL + value: "180" + - name: WALK_MAX_RETRIES + value: "5" + - name: METRICS_INDEXING_ENABLED + value: "false" + - name: POLL_BASE_PROFILES + value: "true" + - name: LOG_LEVEL + value: INFO + - name: DISABLE_MONGO_DEBUG_LOGGING + value: "true" + - name: UDP_CONNECTION_TIMEOUT + value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" + - name: MAX_OID_TO_PROCESS + value: "70" + - name: MAX_REPETITIONS + value: "10" + - name: PYSNMP_DEBUG + value: "" + - name: PROFILES_RELOAD_DELAY + value: "60" + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: SPLUNK_HEC_HOST + value: "" + - name: IGNORE_EMPTY_VARBINDS + value: "false" + - name: SPLUNK_HEC_PORT + value: "8088" + - name: SPLUNK_HEC_INSECURESSL + value: "false" + - name: SPLUNK_AGGREGATE_TRAPS_EVENTS + value: "false" + - name: SPLUNK_METRIC_NAME_HYPHEN_TO_UNDERSCORE + value: "false" + - name: SPLUNK_HEC_TOKEN + valueFrom: + secretKeyRef: + name: splunk-connect-for-snmp-splunk + key: hec_token + - name: SPLUNK_HEC_INDEX_EVENTS + value: netops + - name: SPLUNK_HEC_INDEX_METRICS + value: netmetrics + - name: SPLUNK_SOURCETYPE_TRAPS + value: "sc4snmp:traps" + - name: SPLUNK_SOURCETYPE_POLLING_EVENTS + value: "sc4snmp:event" + - name: SPLUNK_SOURCETYPE_POLLING_METRICS + value: "sc4snmp:metric" + - name: DISCOVERY_FOLDER_PATH + value: /app/discovery + - name: CELERY_TASK_TIMEOUT + value: "2400" + - name: IPv6_ENABLED + value: "true" + + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + - name: discovery-volume + mountPath: /app/discovery + - name: secret-snmpv3-secrets + mountPath: /app/secrets/snmpv3/secret + readOnly: true + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} + - name: discovery-volume + hostPath: + path: /home/devuser/discovery-test2 + type: Directory + - name: secret-snmpv3-secrets + secret: + secretName: secret diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/worker/pdb.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/worker/pdb.yaml new file mode 100644 index 000000000..4b3ea594c --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/worker/pdb.yaml @@ -0,0 +1,18 @@ +--- +# Source: splunk-connect-for-snmp/templates/worker/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: release-name-splunk-connect-for-snmp-worker + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + minAvailable: 80% + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker + app.kubernetes.io/instance: release-name diff --git a/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml new file mode 100644 index 000000000..7997a31aa --- /dev/null +++ b/rendered/manifests/tests_only_discovery/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -0,0 +1,164 @@ +--- +# Source: splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: release-name-splunk-connect-for-snmp-worker-sender + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-sender + app.kubernetes.io/instance: release-name + helm.sh/chart: splunk-connect-for-snmp-CURRENT-VERSION + app.kubernetes.io/version: "CURRENT-VERSION" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-sender + app.kubernetes.io/instance: release-name + template: + metadata: + annotations: + checksum/redis-config: e82c09fa615350d9c147a0884485f953308babd6b8842d0cbe695ed5595eb530 + labels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-sender + app.kubernetes.io/instance: release-name + spec: + serviceAccountName: release-name-splunk-connect-for-snmp-user + securityContext: + fsGroup: 10001 + containers: + - name: splunk-connect-for-snmp-worker-sender + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + runAsGroup: 10001 + image: "ghcr.io/splunk/splunk-connect-for-snmp/container:CURRENT-VERSION" + imagePullPolicy: Always + args: + [ + "celery", "worker-sender", + ] + env: + - name: USER + value: "sc4snmp" + - name: CONFIG_PATH + value: /app/config/config.yaml + - name: SC4SNMP_VERSION + value: CURRENT-VERSION + - name: REDIS_MODE + value: "standalone" + - name: REDIS_HOST + value: release-name-redis + - name: REDIS_PORT + value: "6379" + - name: REDIS_DB + value: "1" + - name: CELERY_DB + value: "0" + - name: MONGO_URI + value: mongodb://release-name-mongodb:27017 + - name: WALK_RETRY_MAX_INTERVAL + value: "180" + - name: WALK_MAX_RETRIES + value: "5" + - name: METRICS_INDEXING_ENABLED + value: "false" + - name: POLL_BASE_PROFILES + value: "true" + - name: LOG_LEVEL + value: INFO + - name: DISABLE_MONGO_DEBUG_LOGGING + value: "true" + - name: UDP_CONNECTION_TIMEOUT + value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" + - name: MAX_OID_TO_PROCESS + value: "70" + - name: MAX_REPETITIONS + value: "10" + - name: PYSNMP_DEBUG + value: "" + - name: PROFILES_RELOAD_DELAY + value: "60" + - name: MIB_SOURCES + value: "http://release-name-mibserver/asn1/@mib@" + - name: MIB_INDEX + value: "http://release-name-mibserver/index.csv" + - name: MIB_STANDARD + value: "http://release-name-mibserver/standard.txt" + - name: SPLUNK_HEC_HOST + value: "" + - name: IGNORE_EMPTY_VARBINDS + value: "false" + - name: SPLUNK_HEC_PORT + value: "8088" + - name: SPLUNK_HEC_INSECURESSL + value: "false" + - name: SPLUNK_AGGREGATE_TRAPS_EVENTS + value: "false" + - name: SPLUNK_METRIC_NAME_HYPHEN_TO_UNDERSCORE + value: "false" + - name: SPLUNK_HEC_TOKEN + valueFrom: + secretKeyRef: + name: splunk-connect-for-snmp-splunk + key: hec_token + - name: SPLUNK_HEC_INDEX_EVENTS + value: netops + - name: SPLUNK_HEC_INDEX_METRICS + value: netmetrics + - name: SPLUNK_SOURCETYPE_TRAPS + value: "sc4snmp:traps" + - name: SPLUNK_SOURCETYPE_POLLING_EVENTS + value: "sc4snmp:event" + - name: SPLUNK_SOURCETYPE_POLLING_METRICS + value: "sc4snmp:metric" + - name: WORKER_CONCURRENCY + value: "4" + - name: PREFETCH_COUNT + value: "30" + volumeMounts: + - name: config + mountPath: "/app/config" + readOnly: true + - name: pysnmp-cache-volume + mountPath: "/.pysnmp/" + readOnly: false + - name: tmp + mountPath: "/tmp/" + readOnly: false + resources: + limits: + cpu: 500m + requests: + cpu: 250m + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: splunk-connect-for-snmp-worker-sender + app.kubernetes.io/instance: release-name + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: splunk-connect-for-snmp-config + # An array of keys from the ConfigMap to create as files + items: + - key: "config.yaml" + path: "config.yaml" + - name: pysnmp-cache-volume + emptyDir: {} + - name: tmp + emptyDir: {} diff --git a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..8f5acde96 --- /dev/null +++ b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,12 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + enabled: false + ipv6Enabled: false + logLevel: INFO + usernameSecrets: [] diff --git a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index 38e0d7d50..671dadab8 100644 --- a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "beat", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: REDIS_MODE diff --git a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 6851a86ec..dce4a4bf8 100644 --- a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -31,5 +31,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + restartPolicy: Never diff --git a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml index f3ce888d1..a261d9964 100644 --- a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +++ b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-poller", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index 18f7815e9..012920ef4 100644 --- a/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests_only_polling/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..8f5acde96 --- /dev/null +++ b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,12 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + enabled: false + ipv6Enabled: false + logLevel: INFO + usernameSecrets: [] diff --git a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 6851a86ec..dce4a4bf8 100644 --- a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -31,5 +31,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + restartPolicy: Never diff --git a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/deployment.yaml index 8beee8755..4079ae818 100644 --- a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -44,6 +44,8 @@ spec: "trap" ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: MONGO_URI diff --git a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index 18f7815e9..012920ef4 100644 --- a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index 4d432158a..1a437570b 100644 --- a/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_only_traps/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-trap", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..8f5acde96 --- /dev/null +++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,12 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + enabled: false + ipv6Enabled: false + logLevel: INFO + usernameSecrets: [] diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index 38e0d7d50..671dadab8 100644 --- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "beat", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: REDIS_MODE diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 6851a86ec..dce4a4bf8 100644 --- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -31,5 +31,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + restartPolicy: Never diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml index 8beee8755..4079ae818 100644 --- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -44,6 +44,8 @@ spec: "trap" ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: MONGO_URI diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml index bccf3e046..12af748f9 100644 --- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-poller", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index e129ecdcd..a2af31868 100644 --- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index d205cab1b..86773327e 100644 --- a/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_probes_enabled/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-trap", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..8f5acde96 --- /dev/null +++ b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,12 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + enabled: false + ipv6Enabled: false + logLevel: INFO + usernameSecrets: [] diff --git a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/scheduler/deployment.yaml b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/scheduler/deployment.yaml index 6c1c0cb88..009780cc5 100644 --- a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/scheduler/deployment.yaml +++ b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/scheduler/deployment.yaml @@ -45,6 +45,8 @@ spec: "celery", "beat", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: REDIS_MODE diff --git a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 6851a86ec..dce4a4bf8 100644 --- a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -31,5 +31,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + restartPolicy: Never diff --git a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/traps/deployment.yaml index 2d6732a8a..4e2ad67a2 100644 --- a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -45,6 +45,8 @@ spec: "trap" ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: MONGO_URI diff --git a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml index ffda765e9..eef501339 100644 --- a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml +++ b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/poller/deployment.yaml @@ -45,6 +45,8 @@ spec: "celery", "worker-poller", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -88,6 +90,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index 7c140fc52..c996dc576 100644 --- a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -45,6 +45,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -88,6 +90,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index c9b51059b..bfe061e65 100644 --- a/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_redis_ha/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -45,6 +45,8 @@ spec: "celery", "worker-trap", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -88,6 +90,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/common/discovery-config.yaml b/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/common/discovery-config.yaml new file mode 100644 index 000000000..8f5acde96 --- /dev/null +++ b/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/common/discovery-config.yaml @@ -0,0 +1,12 @@ +--- +# Source: splunk-connect-for-snmp/templates/common/discovery-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: splunk-connect-for-snmp-discovery-config +data: + discovery-config.yaml: | + enabled: false + ipv6Enabled: false + logLevel: INFO + usernameSecrets: [] diff --git a/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/tests/test-connection.yaml b/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/tests/test-connection.yaml index 6851a86ec..dce4a4bf8 100644 --- a/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/tests/test-connection.yaml +++ b/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/tests/test-connection.yaml @@ -31,5 +31,9 @@ spec: memory: 128Mi requests: cpu: 100m - memory: 128Mi + memory: 128Mi + env: + - name: USER + value: "sc4snmp" + restartPolicy: Never diff --git a/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/traps/deployment.yaml b/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/traps/deployment.yaml index 8beee8755..4079ae818 100644 --- a/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/traps/deployment.yaml +++ b/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/traps/deployment.yaml @@ -44,6 +44,8 @@ spec: "trap" ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: MONGO_URI diff --git a/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml b/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml index 18f7815e9..012920ef4 100644 --- a/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml +++ b/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/worker/sender/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-sender", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml b/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml index 4d432158a..1a437570b 100644 --- a/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml +++ b/rendered/manifests/tests_traps_nodeport/splunk-connect-for-snmp/templates/worker/trap/deployment.yaml @@ -44,6 +44,8 @@ spec: "celery", "worker-trap", ] env: + - name: USER + value: "sc4snmp" - name: CONFIG_PATH value: /app/config/config.yaml - name: SC4SNMP_VERSION @@ -74,6 +76,8 @@ spec: value: "true" - name: UDP_CONNECTION_TIMEOUT value: "3" + - name: UDP_CONNECTION_RETRIES + value: "5" - name: MAX_OID_TO_PROCESS value: "70" - name: MAX_REPETITIONS diff --git a/rendered/values.yaml b/rendered/values.yaml index b65c765af..c02fcb391 100644 --- a/rendered/values.yaml +++ b/rendered/values.yaml @@ -36,4 +36,24 @@ scheduler: poller: inventory: | address,port,version,community,secret,security_engine,walk_interval,profiles,smart_profiles,delete - 54.82.41.24,,2c,public,,,1800,IF_profile,false, \ No newline at end of file + 54.82.41.24,,2c,public,,,1800,IF_profile,false, + +discovery: + enabled: true + ipv6Enabled: true + discoveryPath: "/home/devuser/discovery" + autodiscovery: + discovery_version2c: + frequency: 21600 + delete_already_discovered: false + network_address: 54.82.41.24/28 + version: "2c" + community: "public" + port: 161 + device_rules: + - name: "Linux servers" + patterns: "*linux*" + group: "linux_group" + - name: "Centos servers" + patterns: "*centos*" + group: "centos_group" diff --git a/rendered/values_only_discovery.yaml b/rendered/values_only_discovery.yaml new file mode 100644 index 000000000..89b2a278e --- /dev/null +++ b/rendered/values_only_discovery.yaml @@ -0,0 +1,34 @@ +discovery: + enabled: true + ipv6Enabled: true + discoveryPath: "/home/devuser/discovery-test2" + usernameSecrets: + - secret + + autodiscovery: + discovery_version2c: + frequency: 21600 + delete_already_discovered: false + network_address: 4.91.99.113/28 + version: "2c" + community: "public" + port: 161 + device_rules: + - name: "Linux servers" + patterns: "*linux*" + group: "linux_group" + - name: "Centos servers" + patterns: "*centos*" + group: "centos_group" + discovery_version3: + frequency: 21600 + delete_already_discovered: false + network_address: 4.91.99.113/28 + version: "3" + port: 161 + secret: secret + security_engine: "80001f8880e761866965756b6800000000" + device_rules: + - name: "Linux VM" + patterns: "*Linux*" + group: "linux_group" \ No newline at end of file diff --git a/splunk_connect_for_snmp/celery_config.py b/splunk_connect_for_snmp/celery_config.py index cffe3eb18..448da159d 100644 --- a/splunk_connect_for_snmp/celery_config.py +++ b/splunk_connect_for_snmp/celery_config.py @@ -95,4 +95,5 @@ Queue("traps", exchange="traps"), Queue("poll", exchange="poll"), Queue("send", exchange="send"), + Queue("discovery", exchange="discovery"), ) diff --git a/splunk_connect_for_snmp/common/base_record.py b/splunk_connect_for_snmp/common/base_record.py new file mode 100644 index 000000000..7dd9f9545 --- /dev/null +++ b/splunk_connect_for_snmp/common/base_record.py @@ -0,0 +1,84 @@ +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import socket +from ipaddress import ip_address +from typing import Union + +from pydantic import BaseModel, validator + +RecordStr = Union[None, str] +RecordInt = Union[None, int] + + +class BaseRecord(BaseModel): + """Base class for common SNMP record fields""" + + port: RecordInt = 161 + address: RecordStr + version: RecordStr + community: RecordStr + secret: RecordStr + security_engine: RecordStr = "" + + @validator("address", pre=True) + def address_validator(cls, value, values): + if not value: + raise ValueError("field address cannot be null") + if value.startswith("#"): + raise ValueError("field address cannot be commented") + else: + try: + ip_address(value) + except ValueError: + try: + socket.getaddrinfo(value, values["port"]) + except socket.gaierror: + raise ValueError( + f"field address must be an IP or a resolvable hostname {value}" + ) + + return value + + @validator("port", pre=True) + def port_validator(cls, value): + if value is None or (isinstance(value, str) and value.strip() == ""): + return 161 + else: + if not isinstance(value, int): + value = int(value) + if value < 1 or value > 65535: + raise ValueError(f"Port out of range {value}") + return value + + @validator("version", pre=True) + def version_validator(cls, value): + if value is None or value.strip() == "": + return "2c" + else: + if value not in ("1", "2c", "3"): + raise ValueError( + f"version out of range {value} accepted is 1 or 2c or 3" + ) + return value + + @validator("community", "secret", "security_engine", pre=True) + def community_secret_security_engine_validator(cls, value): + if value is None or (isinstance(value, str) and value.strip() == ""): + return None + else: + return value + + def asdict(self) -> dict: + return self.dict() diff --git a/splunk_connect_for_snmp/common/csv_record_manager.py b/splunk_connect_for_snmp/common/csv_record_manager.py new file mode 100644 index 000000000..55dee4728 --- /dev/null +++ b/splunk_connect_for_snmp/common/csv_record_manager.py @@ -0,0 +1,83 @@ +import csv +import os + +from celery.utils.log import get_task_logger + +logger = get_task_logger(__name__) + + +class CSVRecordManager: + def __init__(self, filename): + self.filename = filename + self.columns = [ + "key", + "subnet", + "ip", + "port", + "version", + "group", + "secret", + "community", + ] + + try: + if not os.path.isfile(filename): + with open(filename, mode="w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=self.columns) + writer.writeheader() + self.rows = [] + else: + with open(filename, newline="") as f: + reader = csv.DictReader(f) + self.rows = list(reader) + except Exception as e: + logger.error(f"Error occurred while reading CSV file: {e}") + raise + + def _normalize_row(self, row: dict) -> dict: + """Strip whitespace and ensure all keys exist with empty string defaults.""" + return {k: str(v).strip() if v is not None else "" for k, v in row.items()} + + def _write_to_csv(self): + """Save current rows back to the CSV file.""" + try: + with open(self.filename, mode="w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=self.columns) + writer.writeheader() + writer.writerows(self.rows) + except Exception as e: + logger.error(f"Error occurred while writing CSV: {e}") + raise + + def create_rows(self, inputs, delete_flag): + """Add new rows into the CSV, also replace missing values with empty strings and removes duplicate rows.""" + try: + new_rows = [self._normalize_row(row) for row in inputs] + + # Deduplicate: use tuple of values as unique key + if delete_flag: + existing = set({}) + else: + existing = { + tuple(row[col] for col in self.columns) for row in self.rows + } + for row in new_rows: + key = tuple(row[col] for col in self.columns) + if key not in existing: + self.rows.append(row) + existing.add(key) + + self._write_to_csv() + except Exception as e: + logger.error(f"Error occurred while adding new rows: {e}") + raise + + def delete_rows_by_key(self, key): + """Delete all rows where the 'key' column matches.""" + try: + self.rows = [ + row for row in self.rows if row["key"].strip() != str(key).strip() + ] + except Exception as e: + logger.error(f"Error occurred while deleting row by key: {e}") + raise diff --git a/splunk_connect_for_snmp/common/discovery_record.py b/splunk_connect_for_snmp/common/discovery_record.py new file mode 100644 index 000000000..0470db1b3 --- /dev/null +++ b/splunk_connect_for_snmp/common/discovery_record.py @@ -0,0 +1,71 @@ +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from ipaddress import ip_network +from typing import List, Union + +from pydantic import validator + +from splunk_connect_for_snmp.common.base_record import BaseRecord +from splunk_connect_for_snmp.common.hummanbool import human_bool + +DiscoveryStr = Union[None, str] +DiscoveryInt = Union[None, int] +DiscoveryBool = Union[None, bool] +DiscoveryList = Union[None, List[dict]] + + +class DiscoveryRecord(BaseRecord): + discovery_name: DiscoveryStr + network_address: DiscoveryStr + frequency: DiscoveryInt + delete_already_discovered: DiscoveryBool + device_rules: DiscoveryList + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @validator("network_address", pre=True) + def network_address_validator(cls, value): + if value is None: + raise ValueError("field network address cannot be null") + else: + try: + ip_network(value, strict=False) + except ValueError: + raise ValueError(f"field network address must be an valid subnet") + + return value + + @validator("frequency", pre=True) + def frequency_validator(cls, value): + if value is None: + return 86400 + elif value < 21600: + return 21600 + + return value + + @validator("device_rules", pre=True) + def device_rules_validator(cls, value): + if value is None or (isinstance(value, list) and value == []): + return None + return value + + @validator("delete_already_discovered", pre=True) + def delete_already_discovered_validator(cls, value): + if value is None: + return False + return human_bool(value) diff --git a/splunk_connect_for_snmp/common/inventory_record.py b/splunk_connect_for_snmp/common/inventory_record.py index 3757da153..49013a439 100644 --- a/splunk_connect_for_snmp/common/inventory_record.py +++ b/splunk_connect_for_snmp/common/inventory_record.py @@ -12,14 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import inspect -import json -import socket -from ipaddress import ip_address from typing import List, Union -from pydantic import BaseModel, validator +from pydantic import validator +from splunk_connect_for_snmp.common.base_record import BaseRecord from splunk_connect_for_snmp.common.hummanbool import human_bool InventoryStr = Union[None, str] @@ -32,13 +29,7 @@ } -class InventoryRecord(BaseModel): - port: InventoryInt = 161 - address: InventoryStr - version: InventoryStr - community: InventoryStr - secret: InventoryStr - security_engine: InventoryStr = "" +class InventoryRecord(BaseRecord): walk_interval: InventoryInt = 42000 profiles: List smart_profiles: InventoryBool @@ -52,55 +43,6 @@ def __init__(self, *args, **kwargs): kwargs.pop(old, None) super().__init__(*args, **kwargs) - @validator("address", pre=True) - def address_validator(cls, value, values): - if value is None: - raise ValueError("field address cannot be null") - if value.startswith("#"): - raise ValueError("field address cannot be commented") - else: - try: - ip_address(value) - except ValueError: - try: - socket.getaddrinfo(value, values["port"]) - except socket.gaierror: - raise ValueError( - f"field address must be an IP or a resolvable hostname {value}" - ) - - return value - - @validator("port", pre=True) - def port_validator(cls, value): - if value is None or (isinstance(value, str) and value.strip() == ""): - return 161 - else: - if not isinstance(value, int): - value = int(value) - - if value < 1 or value > 65535: - raise ValueError(f"Port out of range {value}") - return value - - @validator("version", pre=True) - def version_validator(cls, value): - if value is None or value.strip() == "": - return "2c" - else: - if value not in ("1", "2c", "3"): - raise ValueError( - f"version out of range {value} accepted is 1 or 2c or 3" - ) - return value - - @validator("community", "secret", "security_engine", pre=True) - def community_secret_security_engine_validator(cls, value): - if value is None or (isinstance(value, str) and value.strip() == ""): - return None - else: - return value - @validator("walk_interval", pre=True) def walk_interval_validator(cls, value): if not value: @@ -142,6 +84,3 @@ def group_validator(cls, value): return False else: return value - - def asdict(self) -> dict: - return self.dict() diff --git a/splunk_connect_for_snmp/common/task_generator.py b/splunk_connect_for_snmp/common/task_generator.py index bb2dc59ca..0b7481011 100644 --- a/splunk_connect_for_snmp/common/task_generator.py +++ b/splunk_connect_for_snmp/common/task_generator.py @@ -132,3 +132,28 @@ def run_immediately(self): if self.schedule_period > 300: return True return False + + +class DiscoveryTaskGenerator(TaskGenerator): + def __init__(self, discovery_record, app): + super().__init__( + target=discovery_record.network_address, + schedule_period=discovery_record.frequency, + app=app, + ) + self.discovery_record = discovery_record + self.discovery_name = discovery_record.discovery_name + self.DISCOVERY_CHAIN_OF_TASK = { + "queue": "discovery", + "expires": CHAIN_OF_TASKS_EXPIRY_TIME, + } + + def generate_task_definition(self): + task_data = super().generate_task_definition() + name = f"sc4snmp;{self.discovery_name};discovery" + task_data["name"] = name + task_data["task"] = "splunk_connect_for_snmp.discovery.tasks.discovery" + task_data["run_immediately"] = True + task_data["options"] = self.DISCOVERY_CHAIN_OF_TASK + task_data["kwargs"] = self.discovery_record.dict() + return task_data diff --git a/splunk_connect_for_snmp/customtaskmanager.py b/splunk_connect_for_snmp/customtaskmanager.py index f785db0dd..8df3c74d1 100644 --- a/splunk_connect_for_snmp/customtaskmanager.py +++ b/splunk_connect_for_snmp/customtaskmanager.py @@ -57,6 +57,7 @@ def did_expiry_time_change(self, new_expiry_time): if previous_expiry_time is not None and previous_expiry_time != new_expiry_time: self.delete_all_walk_tasks() self.delete_all_poll_tasks() + self.delete_all_discovery_tasks() expiry_time_changed = True return expiry_time_changed @@ -70,6 +71,12 @@ def delete_all_walk_tasks(self): "splunk_connect_for_snmp.snmp.tasks.walk", "delete_all_walk_tasks" ) + def delete_all_discovery_tasks(self): + self.__delete_all_tasks_of_type( + "splunk_connect_for_snmp.discovery.tasks.discovery", + "delete_all_discovery_tasks", + ) + def rerun_all_walks(self): periodic_tasks = RedBeatSchedulerEntry.get_schedules() for periodic_document in periodic_tasks: diff --git a/splunk_connect_for_snmp/discovery/discovery_manager.py b/splunk_connect_for_snmp/discovery/discovery_manager.py new file mode 100644 index 000000000..57f63ea76 --- /dev/null +++ b/splunk_connect_for_snmp/discovery/discovery_manager.py @@ -0,0 +1,244 @@ +import asyncio +import copy +import fnmatch +import ipaddress +import os +import re + +from celery import Task +from celery.utils.log import get_task_logger +from filelock import FileLock +from pysnmp.hlapi.asyncio import ( + ContextData, + ObjectIdentity, + ObjectType, + SnmpEngine, + get_cmd, +) + +from splunk_connect_for_snmp.common.csv_record_manager import CSVRecordManager +from splunk_connect_for_snmp.common.discovery_record import DiscoveryRecord +from splunk_connect_for_snmp.discovery.exceptions import DiscoveryError +from splunk_connect_for_snmp.snmp.auth import get_auth, setup_transport_target + +logger = get_task_logger(__name__) + +DISCOVERY_FOLDER_PATH = os.getenv("DISCOVERY_FOLDER_PATH", "/app/discovery") +DISCOVERY_CSV_PATH = os.path.join(DISCOVERY_FOLDER_PATH, "discovery_devices.csv") +DISCOVERY_LOCK_PATH = os.path.join(DISCOVERY_FOLDER_PATH, "discovery_devices.lock") +DEFAULT_CONCURRENCY = 10 +DEFAULT_GROUP_NAME = "default_group" + + +class Discovery(Task): + def __init__(self): + self.snmp_engine = SnmpEngine() + + def get_host_list(self, subnet): + """Get host list""" + try: + network = ipaddress.ip_network(subnet, strict=False) + return [str(ip) for ip in network.hosts()] + except Exception as e: + err_msg = ( + f"Error occured while finding active hosts for subnet {subnet}: {e}" + ) + raise DiscoveryError(err_msg) + + def find_device_group(self, varbinds, device_rules) -> str: + """ + Find the device group based on varbind's value matching rules. + + :param varbinds: SNMP varbinds. + :param device_rules: List of rules with patterns and groups + + :returns: Group name (defaults to DEFAULT_GROUP_NAME if no match) + """ + device_rules_errors = [] # type: ignore + if not isinstance(device_rules, list): + return DEFAULT_GROUP_NAME + + value = varbinds[0][1].prettyPrint() + + for device_rule in device_rules: + try: + pattern = device_rule.get("patterns") + if not pattern: + continue + + regex_pattern = fnmatch.translate(pattern) + if re.search(regex_pattern, value, re.IGNORECASE): + group_name = device_rule.get("group", DEFAULT_GROUP_NAME) + if device_rules_errors: + logger.warning( + f"Invalid device rules found for: {device_rules_errors} and continue with {group_name}" + ) + return group_name + except Exception as e: + device_rules_errors.append( + {"device_rule": device_rule, "error": str(e)} + ) + continue + + if device_rules_errors: + logger.warning( + f"Invalid device rules found: {device_rules_errors} and continue with {DEFAULT_GROUP_NAME}" + ) + + return DEFAULT_GROUP_NAME + + async def check_snmp_device( + self, ip, discovery_record: DiscoveryRecord + ) -> dict | None: + """ + Check if an SNMP device responds at the given IP. + :param ip: IP address of target device. + :param discovery_record: A Discovery Record object. + + :return: A dictionary containing discovery_record with group_name. + """ + discovery_record.address = ip + auth_data = await get_auth(logger, discovery_record, SnmpEngine()) + transport_target = await setup_transport_target(discovery_record) + + error_indication, error_status, error_index, var_binds = await get_cmd( + SnmpEngine(), + auth_data, + transport_target, + ContextData(), + ObjectType(ObjectIdentity("SNMPv2-MIB", "sysDescr", 0)), + ) + + if error_indication: + logger.debug(f"SNMP error for {ip}: {error_indication}") + return None + + if error_status != 0: + logger.debug( + f"SNMP error status for {ip}: {error_status} at index {error_index}" + ) + return None + + group_name = self.find_device_group(var_binds, discovery_record.device_rules) + + return { + "key": discovery_record.discovery_name, + "ip": ip, + "subnet": discovery_record.network_address, + "group": group_name, + "version": discovery_record.version, + "port": discovery_record.port, + "secret": discovery_record.secret, + "community": discovery_record.community, + } + + async def _scan(self, ip, discovery_record) -> dict | None: + """ + Perform an SNMP check for a single IP address with concurrency control. + + :param ip: IP address to check the snmp device. + :param discovery_record: Discovery configuration record. + """ + semaphore = asyncio.Semaphore(DEFAULT_CONCURRENCY) + async with semaphore: + try: + result = await self.check_snmp_device( + ip, copy.deepcopy(discovery_record) + ) + if result: + logger.debug( + f"SNMP device found: {result}. From discovery: {discovery_record.discovery_name}" + ) + return result + except Exception as e: + logger.error(f"SNMP check failed for {ip}: {e}") + return None + + def discover_snmp_devices( + self, ip_list: list[str], discovery_record: DiscoveryRecord + ) -> list[dict[str, str]]: + """ + Synchronous wrapper for async discover_snmp_devices_details. + This calls asyncio.run() ONCE per task, creating a single event loop + that handles all SNMP queries concurrently. + + :param ip_list: List of IP addresses to scan + :param discovery_record: Discovery configuration record + + :return list: A list of dictionaries containing discovered device information + """ + return asyncio.run( + self._discover_snmp_devices_details(ip_list, discovery_record) + ) + + async def _discover_snmp_devices_details( + self, ip_list: list[str], discovery_record: DiscoveryRecord + ) -> list[dict[str, str]]: + """ + Scan multiple IPs for SNMP-enabled devices using semaphore-based concurrency. + + :param ip_list: List of IP addresses to scan + :param discovery_record: Discovery configuration record + :param concurrency: Maximum concurrent SNMP checks + + :return list: A list of dictionaries containing discovered device information + """ + devices_detail = [] + + results = await asyncio.gather( + *[self._scan(ip, discovery_record) for ip in ip_list], + return_exceptions=True, + ) + + for idx, result in enumerate(results): + if isinstance(result, (Exception | BaseException)): + logger.error( + f"Snmp check for device {ip_list[idx]} generated an exception : {result}" + ) + continue + elif result: + logger.debug( + f"SNMP device found for {ip_list[idx]}: {result}. Device is from discovery: {discovery_record.discovery_name}" + ) + devices_detail.append(result) + + return devices_detail + + def add_devices_detail_to_csv( + self, snmp_devices_detail, delete_flag, dicovery_name + ): + """Add snmp devices detail to CSV""" + lock = FileLock(DISCOVERY_LOCK_PATH) + with lock: + csv_service = CSVRecordManager(DISCOVERY_CSV_PATH) + if delete_flag is True: + csv_service.delete_rows_by_key(dicovery_name) + csv_service.create_rows(snmp_devices_detail, delete_flag) + + def do_work(self, discovery_record: DiscoveryRecord) -> list: + try: + logger.info( + f"Starting SNMP discovery for '{discovery_record.discovery_name}' " + f"on subnet {discovery_record.network_address}" + ) + host_list = self.get_host_list(discovery_record.network_address) + logger.info(f"Number of Active hosts: {len(host_list)}") + + snmp_devices_detail = self.discover_snmp_devices( + host_list, discovery_record + ) + + self.add_devices_detail_to_csv( + snmp_devices_detail, + discovery_record.delete_already_discovered, + discovery_record.discovery_name, + ) + logger.info( + f"SNMP discovery completed for '{discovery_record.discovery_name}'. " + f"Discovered {len(snmp_devices_detail)} devices" + ) + return snmp_devices_detail + except Exception as e: + raise DiscoveryError( + f"Error occurred while finding SNMP enabled device: {e}" + ) diff --git a/splunk_connect_for_snmp/discovery/exceptions.py b/splunk_connect_for_snmp/discovery/exceptions.py new file mode 100644 index 000000000..40d34f387 --- /dev/null +++ b/splunk_connect_for_snmp/discovery/exceptions.py @@ -0,0 +1,4 @@ +class DiscoveryError(Exception): + """Exception raised for errors produced during execution of SNMP operations/Discovery Operations""" + + pass diff --git a/splunk_connect_for_snmp/discovery/loader.py b/splunk_connect_for_snmp/discovery/loader.py new file mode 100644 index 000000000..98c859f73 --- /dev/null +++ b/splunk_connect_for_snmp/discovery/loader.py @@ -0,0 +1,91 @@ +import ipaddress +import logging +import os +import sys +from contextlib import suppress + +import yaml + +from splunk_connect_for_snmp import customtaskmanager +from splunk_connect_for_snmp.common.customised_json_formatter import ( + CustomisedJSONFormatter, +) +from splunk_connect_for_snmp.common.discovery_record import DiscoveryRecord +from splunk_connect_for_snmp.common.task_generator import DiscoveryTaskGenerator +from splunk_connect_for_snmp.poller import app + +with suppress(ImportError, OSError): + from dotenv import load_dotenv + + load_dotenv() + +DISCOVERY_CONFIG_PATH = os.getenv( + "DISCOVERY_CONFIG_PATH", "/app/discovery/discovery-config.yaml" +) +CHAIN_OF_TASKS_EXPIRY_TIME = os.getenv("CHAIN_OF_TASKS_EXPIRY_TIME", "60") +LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO") + +formatter = CustomisedJSONFormatter() + +logger = logging.getLogger(__name__) +logger.setLevel(LOG_LEVEL) + +# writing to stdout +handler = logging.StreamHandler(sys.stdout) +handler.setLevel(LOG_LEVEL) +handler.setFormatter(formatter) +logger.addHandler(handler) + + +def autodiscovery_task_definition(discovery_record, app): + discovery_definition = DiscoveryTaskGenerator( + discovery_record=discovery_record, app=app + ) + task_config = discovery_definition.generate_task_definition() + return task_config + + +def check_ipv6(subnet): + network = ipaddress.ip_network(subnet, strict=False) + return isinstance(network, ipaddress.IPv6Network) + + +def load(): + try: + with open(DISCOVERY_CONFIG_PATH, encoding="utf-8") as file: + config_runtime = yaml.safe_load(file) + ipv6_enabled = config_runtime.get("ipv6Enabled", False) + autodiscovery = config_runtime.get("autodiscovery", {}) + periodic_obj = customtaskmanager.CustomPeriodicTaskManager() + expiry_time_changed = periodic_obj.did_expiry_time_change( + CHAIN_OF_TASKS_EXPIRY_TIME + ) + if expiry_time_changed: + logger.info( + f"Task expiry time was modified, generating new tasks for discovery" + ) + + for key, value in autodiscovery.items(): + value["discovery_name"] = key + discovery_record = DiscoveryRecord(**value) + is_ipv6 = check_ipv6(value["network_address"]) + if not is_ipv6 or (is_ipv6 and ipv6_enabled): + logger.info(f"Adding the task for {key}") + task_config = autodiscovery_task_definition( + discovery_record=discovery_record, app=app + ) + periodic_obj.manage_task(**task_config) + else: + logger.info( + f"Skipping task for the discovery: {key} because IPv6 is disabled." + ) + return 0 + except Exception as e: + logger.error("Error occured while creating the task : {e}") + raise + + +if __name__ == "__main__": + r = load() + if r: + sys.exit(0) diff --git a/splunk_connect_for_snmp/discovery/tasks.py b/splunk_connect_for_snmp/discovery/tasks.py new file mode 100644 index 000000000..812ada91d --- /dev/null +++ b/splunk_connect_for_snmp/discovery/tasks.py @@ -0,0 +1,14 @@ +from celery import shared_task +from celery.utils.log import get_task_logger + +from splunk_connect_for_snmp.common.discovery_record import DiscoveryRecord +from splunk_connect_for_snmp.discovery.discovery_manager import Discovery + +logger = get_task_logger(__name__) + + +@shared_task(bind=True, base=Discovery) +def discovery(self: Discovery, **kwargs) -> dict: + discovery_record = DiscoveryRecord(**kwargs) + result = self.do_work(discovery_record) + return {"snmp_device_details": result} diff --git a/splunk_connect_for_snmp/poller.py b/splunk_connect_for_snmp/poller.py index 099cc3c0b..f9488269f 100644 --- a/splunk_connect_for_snmp/poller.py +++ b/splunk_connect_for_snmp/poller.py @@ -50,5 +50,6 @@ "splunk_connect_for_snmp.inventory", "splunk_connect_for_snmp.snmp", "splunk_connect_for_snmp.splunk", + "splunk_connect_for_snmp.discovery", ] ) diff --git a/splunk_connect_for_snmp/snmp/auth.py b/splunk_connect_for_snmp/snmp/auth.py index cf89bc297..18e58f055 100644 --- a/splunk_connect_for_snmp/snmp/auth.py +++ b/splunk_connect_for_snmp/snmp/auth.py @@ -18,24 +18,25 @@ from ipaddress import ip_address from typing import Any, Dict, Union -from pysnmp.hlapi import ( +from pysnmp.hlapi.asyncio import ( CommunityData, ContextData, SnmpEngine, Udp6TransportTarget, UdpTransportTarget, UsmUserData, - getCmd, + get_cmd, ) from pysnmp.proto.api.v2c import OctetString from pysnmp.smi.rfc1902 import ObjectIdentity, ObjectType +from splunk_connect_for_snmp.common.base_record import BaseRecord from splunk_connect_for_snmp.common.hummanbool import human_bool -from splunk_connect_for_snmp.common.inventory_record import InventoryRecord from splunk_connect_for_snmp.snmp.const import AuthProtocolMap, PrivProtocolMap from splunk_connect_for_snmp.snmp.exceptions import SnmpActionError UDP_CONNECTION_TIMEOUT = int(os.getenv("UDP_CONNECTION_TIMEOUT", 1)) +UDP_CONNECTION_RETRIES = int(os.getenv("UDP_CONNECTION_RETRIES", 5)) IPv6_ENABLED = human_bool(os.getenv("IPv6_ENABLED", "false").lower()) @@ -56,14 +57,14 @@ def get_secret_value( # To discover remote SNMP EngineID we will tap on SNMP engine inner workings # by setting up execution point observer setup on INTERNAL class PDU processing # -def get_security_engine_id(logger, ir: InventoryRecord, snmp_engine: SnmpEngine): +async def get_security_engine_id(logger, br: BaseRecord, snmp_engine: SnmpEngine): observer_context: Dict[Any, Any] = {} - transport_target = setup_transport_target(ir) + transport_target = await setup_transport_target(br) # Register a callback to be invoked at specified execution point of # SNMP Engine and passed local variables at execution point's local scope - snmp_engine.observer.registerObserver( + snmp_engine.observer.register_observer( lambda e, p, v, c: c.update(securityEngineId=v["securityEngineId"]), "rfc3412.prepareDataElements:internal", cbCtx=observer_context, @@ -72,39 +73,43 @@ def get_security_engine_id(logger, ir: InventoryRecord, snmp_engine: SnmpEngine) # Send probe SNMP request with invalid credentials auth_data = UsmUserData("non-existing-user") - error_indication, _, _, _ = next( - getCmd( - snmp_engine, - auth_data, - transport_target, - ContextData(), - ObjectType(ObjectIdentity("SNMPv2-MIB", "sysDescr", 0)), - ) + error_indication, _, _, _ = await get_cmd( + snmp_engine, + auth_data, + transport_target, + ContextData(), + ObjectType(ObjectIdentity("SNMPv2-MIB", "sysDescr", 0)), ) # See if our SNMP engine received REPORT PDU containing securityEngineId security_engine_id = fetch_security_engine_id( - observer_context, error_indication, ir.address + observer_context, error_indication, br.address ) - logger.debug(f"securityEngineId={security_engine_id} for device {ir.address}") + logger.debug(f"securityEngineId={security_engine_id} for device {br.address}") return security_engine_id -def setup_transport_target(ir): - ip = get_ip_from_socket(ir) if IPv6_ENABLED else ir.address +async def setup_transport_target(br: BaseRecord): + ip = get_ip_from_socket(br) if IPv6_ENABLED else br.address if IPv6_ENABLED and ip_address(ip).version == 6: - return Udp6TransportTarget( - (ir.address, ir.port), timeout=UDP_CONNECTION_TIMEOUT + return await Udp6TransportTarget.create( + (br.address, br.port), + timeout=UDP_CONNECTION_TIMEOUT, + retries=UDP_CONNECTION_RETRIES, ) - return UdpTransportTarget((ir.address, ir.port), timeout=UDP_CONNECTION_TIMEOUT) + return await UdpTransportTarget.create( + (br.address, br.port), + timeout=UDP_CONNECTION_TIMEOUT, + retries=UDP_CONNECTION_RETRIES, + ) -def get_ip_from_socket(ir): +def get_ip_from_socket(br: BaseRecord): # Example of response from getaddrinfo # [(< AddressFamily.AF_INET6: 10 >, < SocketKind.SOCK_STREAM: 1 >, 6, '', ('2607:f8b0:4004:c09::64', 161, 0, 0)), # (< AddressFamily.AF_INET: 2 >, < SocketKind.SOCK_STREAM: 1 >, 6, '', ('142.251.16.139', 161))] - return socket.getaddrinfo(ir.address, ir.port)[0][4][0] + return socket.getaddrinfo(br.address, br.port)[0][4][0] def fetch_security_engine_id(observer_context, error_indication, ipaddress): @@ -116,8 +121,8 @@ def fetch_security_engine_id(observer_context, error_indication, ipaddress): ) -def get_auth_v3(logger, ir: InventoryRecord, snmp_engine: SnmpEngine) -> UsmUserData: - location = os.path.join("secrets/snmpv3", ir.secret) # type: ignore +async def get_auth_v3(logger, br: BaseRecord, snmp_engine: SnmpEngine) -> UsmUserData: + location = os.path.join("secrets/snmpv3", br.secret) # type: ignore if os.path.exists(location): username = get_secret_value(location, "userName", required=True) @@ -140,14 +145,14 @@ def get_auth_v3(logger, ir: InventoryRecord, snmp_engine: SnmpEngine) -> UsmUser get_secret_value(location, "privKeyType", required=False, default="0") ) if ( - isinstance(ir.security_engine, str) - and ir.security_engine != "" - and not ir.security_engine.isdigit() + isinstance(br.security_engine, str) + and br.security_engine != "" + and not br.security_engine.isdigit() ): - security_engine_id = OctetString(hexValue=ir.security_engine) + security_engine_id = OctetString(hexValue=br.security_engine) logger.debug(f"Security eng from profile {security_engine_id}") else: - security_engine_id = get_security_engine_id(logger, ir, snmp_engine) + security_engine_id = await get_security_engine_id(logger, br, snmp_engine) logger.debug(f"Security eng dynamic {security_engine_id}") security_name = None @@ -167,23 +172,23 @@ def get_auth_v3(logger, ir: InventoryRecord, snmp_engine: SnmpEngine) -> UsmUser ) else: - raise FileNotFoundError(f"invalid username from secret {ir.secret}") + raise FileNotFoundError(f"invalid username from secret {br.secret}") -def get_auth_v2c(ir: InventoryRecord) -> CommunityData: - return CommunityData(ir.community, mpModel=1) +def get_auth_v2c(br: BaseRecord) -> CommunityData: + return CommunityData(br.community, mpModel=1) -def get_auth_v1(ir: InventoryRecord) -> CommunityData: - return CommunityData(ir.community, mpModel=0) +def get_auth_v1(br: BaseRecord) -> CommunityData: + return CommunityData(br.community, mpModel=0) -def get_auth( - logger, ir: InventoryRecord, snmp_engine: SnmpEngine +async def get_auth( + logger, br: BaseRecord, snmp_engine: SnmpEngine ) -> Union[UsmUserData, CommunityData]: - if ir.version == "1": - return get_auth_v1(ir) - elif ir.version == "2c": - return get_auth_v2c(ir) + if br.version == "1": + return get_auth_v1(br) + elif br.version == "2c": + return get_auth_v2c(br) else: - return get_auth_v3(logger, ir, snmp_engine) + return await get_auth_v3(logger, br, snmp_engine) diff --git a/splunk_connect_for_snmp/snmp/const.py b/splunk_connect_for_snmp/snmp/const.py index 2f1bcc3a5..979642352 100644 --- a/splunk_connect_for_snmp/snmp/const.py +++ b/splunk_connect_for_snmp/snmp/const.py @@ -16,25 +16,25 @@ from pysnmp.entity import config AuthProtocolMap = { - "MD5": config.usmHMACMD5AuthProtocol, - "SHA": config.usmHMACSHAAuthProtocol, - "SHA224": config.usmHMAC128SHA224AuthProtocol, - "SHA256": config.usmHMAC192SHA256AuthProtocol, - "SHA384": config.usmHMAC256SHA384AuthProtocol, - "SHA512": config.usmHMAC384SHA512AuthProtocol, - "NONE": config.usmNoAuthProtocol, + "MD5": config.USM_AUTH_HMAC96_MD5, + "SHA": config.USM_AUTH_HMAC96_SHA, + "SHA224": config.USM_AUTH_HMAC128_SHA224, + "SHA256": config.USM_AUTH_HMAC192_SHA256, + "SHA384": config.USM_AUTH_HMAC256_SHA384, + "SHA512": config.USM_AUTH_HMAC384_SHA512, + "NONE": config.USM_AUTH_NONE, } PrivProtocolMap = { - "DES": config.usmDESPrivProtocol, - "3DES": config.usm3DESEDEPrivProtocol, - "AES": config.usmAesCfb128Protocol, - "AES128": config.usmAesCfb128Protocol, - "AES192": config.usmAesCfb192Protocol, - "AES192BLMT": config.usmAesBlumenthalCfb192Protocol, - "AES256": config.usmAesCfb256Protocol, - "AES256BLMT": config.usmAesBlumenthalCfb256Protocol, - "NONE": config.usmNoPrivProtocol, + "DES": config.USM_PRIV_CBC56_DES, + "3DES": config.USM_PRIV_CBC168_3DES, + "AES": config.USM_PRIV_CFB128_AES, + "AES128": config.USM_PRIV_CFB128_AES, + "AES192": config.USM_PRIV_CFB192_AES, + "AES192BLMT": config.USM_PRIV_CFB192_AES_BLUMENTHAL, + "AES256": config.USM_PRIV_CFB256_AES, + "AES256BLMT": config.USM_PRIV_CFB256_AES_BLUMENTHAL, + "NONE": config.USM_PRIV_NONE, } DEFAULT_POLLING_FREQUENCY = 60 diff --git a/splunk_connect_for_snmp/snmp/context.py b/splunk_connect_for_snmp/snmp/context.py index 1e1dc0d0d..ca6d61e5a 100644 --- a/splunk_connect_for_snmp/snmp/context.py +++ b/splunk_connect_for_snmp/snmp/context.py @@ -14,8 +14,8 @@ # limitations under the License. # -from pysnmp.hlapi import ContextData +from pysnmp.hlapi.asyncio import ContextData def get_context_data() -> ContextData: - return ContextData(None, "") + return ContextData(contextEngineId=None, contextName=b"") diff --git a/splunk_connect_for_snmp/snmp/manager.py b/splunk_connect_for_snmp/snmp/manager.py index 60508bf1f..17fb7af0d 100644 --- a/splunk_connect_for_snmp/snmp/manager.py +++ b/splunk_connect_for_snmp/snmp/manager.py @@ -17,7 +17,6 @@ from contextlib import suppress from pysnmp.proto.errind import EmptyResponse -from pysnmp.smi import error from requests import Session from splunk_connect_for_snmp.common.collection_manager import ProfilesManager @@ -38,7 +37,7 @@ import pymongo from celery import Task from celery.utils.log import get_task_logger -from pysnmp.hlapi import SnmpEngine, bulkCmd, getCmd +from pysnmp.hlapi.asyncio import SnmpEngine, get_cmd from pysnmp.smi import compiler, view from pysnmp.smi.rfc1902 import ObjectIdentity, ObjectType from requests_cache import MongoCache @@ -49,6 +48,7 @@ from splunk_connect_for_snmp.snmp.auth import get_auth, setup_transport_target from splunk_connect_for_snmp.snmp.context import get_context_data from splunk_connect_for_snmp.snmp.exceptions import SnmpActionError +from splunk_connect_for_snmp.snmp.multi_bulk_walk_cmd import multi_bulk_walk_cmd MIB_SOURCES = os.getenv("MIB_SOURCES", "https://pysnmp.github.io/mibs/asn1/@mib@") MIB_INDEX = os.getenv("MIB_INDEX", "https://pysnmp.github.io/mibs/index.csv") @@ -126,21 +126,23 @@ def get_inventory(mongo_inventory, address): def _any_failure_happened( - error_indication, error_status, error_index, varbinds: list, address, walk + error_indication, error_status, error_index, varbinds: tuple, address, is_walk ) -> bool: """ This function checks if any failure happened during GET or BULK operation. - @param error_indication: - @param error_status: - @param error_index: index of varbind where error appeared - @param varbinds: list of varbinds - @return: if any failure happened + :param error_indication: True value indicates SNMP engine error. + :param error_status: True value indicates SNMP PDU error. + :param error_index: Non-zero value refers to varBinds[errorIndex-1]. + :param varbinds: a sequential tuple of varbinds + :param address: IP address. + :param is_walk: A bool indicating whether walk is True or False + :return: if any failure happened """ if error_indication: if isinstance(error_indication, EmptyResponse) and IGNORE_EMPTY_VARBINDS: return False raise SnmpActionError( - f"An error of SNMP isWalk={walk} for a host {address} occurred: {error_indication}" + f"An error of SNMP isWalk={is_walk} for a host {address} occurred: {error_indication}" ) elif error_status: result = "{} at {}".format( @@ -148,7 +150,7 @@ def _any_failure_happened( error_index and varbinds[int(error_index) - 1][0] or "?", ) raise SnmpActionError( - f"An error of SNMP isWalk={walk} for a host {address} occurred: {result}" + f"An error of SNMP isWalk={is_walk} for a host {address} occurred: {result}" ) return False @@ -293,13 +295,13 @@ def __init__(self, **kwargs): self.last_modified = time.time() self.snmpEngine = SnmpEngine() self.already_loaded_mibs = set() - self.builder = self.snmpEngine.getMibBuilder() + self.builder = self.snmpEngine.get_mib_builder() self.mib_view_controller = view.MibViewController(self.builder) - compiler.addMibCompiler(self.builder, sources=[MIB_SOURCES]) + compiler.add_mib_compiler(self.builder, sources=[MIB_SOURCES]) for mib in DEFAULT_STANDARD_MIBS: self.standard_mibs.append(mib) - self.builder.loadModules(mib) + self.builder.load_modules(mib) mib_response = self.session.get(f"{MIB_INDEX}") self.mib_map = {} @@ -312,33 +314,57 @@ def __init__(self, **kwargs): logger.debug(f"Loaded {len(self.mib_map.keys())} mib map entries") else: logger.error( - f"Unable to load mib map from index http error {self.mib_response.status_code}" + f"Unable to load mib map from index http error {mib_response.status_code}" ) - def do_work( + def get_snmp_engine(self) -> SnmpEngine: + """ + :returns: The SnmpEngine with mibViewController cache attached. + """ + snmp_engine = SnmpEngine() + snmp_engine.cache["mibViewController"] = self.mib_view_controller + return snmp_engine + + async def do_work( self, ir: InventoryRecord, - walk: bool = False, + is_walk: bool = False, profiles: Union[List[str], None] = None, ): + """ + ## NOTE + - When a task arrived at poll queue starts with a fresh SnmpEngine (which has no transport_dispatcher + attached), SNMP requests (get_cmd or bulk_walk_cmd or any other) run normally. + - if a later task finds that the SnmpEngine already has a transport_dispatcher, it reuse that transport_dispatcher. + this causes SNMP requests to hang infinite time. + - If this hang occurs, then as per our Celery configuration, any task that + remains in the queue longer than the default 2400s will be forcefully + hard-timed-out and discarded. + - The issue does not always appear on the alternate task but it may happen + on the second, third, or any subsequent task, depending on timing and + concurrency. + + The better way to eliminate this hang is to use new SnmpEngine for each snmp request. + + """ retry = False address = transform_address_to_key(ir.address, ir.port) logger.info(f"Preparing task for {ir.address}") - if time.time() - self.last_modified > PROFILES_RELOAD_DELAY or walk: + if time.time() - self.last_modified > PROFILES_RELOAD_DELAY or is_walk: self.profiles = self.profiles_manager.return_collection() self.profiles_collection.update(self.profiles) self.last_modified = time.time() logger.debug("Profiles reloaded") varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = self.get_varbinds( - address, walk=walk, profiles=profiles + address, is_walk=is_walk, profiles=profiles ) - auth_data = get_auth(logger, ir, self.snmpEngine) + auth_data = await get_auth(logger, ir, self.snmpEngine) context_data = get_context_data() - transport = setup_transport_target(ir) + transport = await setup_transport_target(ir) metrics: Dict[str, Any] = {} if not varbinds_get and not varbinds_bulk: @@ -346,7 +372,7 @@ def do_work( return False, {} if varbinds_bulk: - self.run_bulk_request( + await self.run_bulk_request( address, auth_data, bulk_mapping, @@ -355,11 +381,11 @@ def do_work( metrics, transport, varbinds_bulk, - walk, + is_walk, ) if varbinds_get: - self.run_get_request( + await self.run_get_request( address, auth_data, context_data, @@ -368,7 +394,7 @@ def do_work( metrics, transport, varbinds_get, - walk, + is_walk, ) for group_key, metric in metrics.items(): @@ -379,57 +405,94 @@ def do_work( return retry, metrics - def run_get_request( + async def run_get_request( self, address, auth_data, context_data, get_mapping, - ir, - metrics, + ir: InventoryRecord, + metrics: dict, transport, varbinds_get, - walk, + is_walk, ): + snmp_engine = self.get_snmp_engine() # some devices cannot process more OID than X, so it is necessary to divide it on chunks for varbind_chunk in self.get_varbind_chunk(varbinds_get, MAX_OID_TO_PROCESS): - for ( + try: + (error_indication, error_status, error_index, varbind_table) = ( + await get_cmd( + snmp_engine, + auth_data, + transport, + context_data, + *varbind_chunk, + lookupMib=True, + ) + ) + except Exception as e: + logger.exception(f"Error while performing get_cmd: {e}") + continue + + if not _any_failure_happened( error_indication, error_status, error_index, varbind_table, - ) in getCmd( - self.snmpEngine, auth_data, transport, context_data, *varbind_chunk + ir.address, + is_walk, ): - if not _any_failure_happened( - error_indication, - error_status, - error_index, - varbind_table, - ir.address, - walk, - ): - self.process_snmp_data(varbind_table, metrics, address, get_mapping) - - def run_bulk_request( + self.process_snmp_data(varbind_table, metrics, address, get_mapping) + + async def run_bulk_request( self, address, auth_data, bulk_mapping, context_data, - ir, - metrics, + ir: InventoryRecord, + metrics: dict, transport, varbinds_bulk, - walk, + is_walk, ): - for ( + """ + Perform asynchronous SNMP BULK requests on multiple varbinds simultaneously. + + This function uses multi_bulk_walk_cmd to walk multiple SNMP varbinds in parallel + within a single SNMP session, reducing network roundtrips and improving performance. + + :param address: IP address of the SNMP device to query + :param auth_data: SNMP authentication data + :param bulk_mapping: mapping dictionary to process SNMP metrics + :param context_data: SNMP ContextData object + :param ir: object containing SNMP device info + :param metrics: dictionary to store metrics collected from SNMP responses + :param transport: SNMP transport target + :param varbinds_bulk: set of SNMP varbinds to query in parallel + :param is_walk: boolean flag indicating if it is a walk operation + + :return: None + + ## NOTE + - The current `bulkCmd` of PySNMP does not support the `lexicographicMode` option. + As a result, the walk is not strictly confined to the requested varBind subtree and may go beyond the requested OID subtree, + with a high chance of duplicate OIDs. + - Uses custom `multi_bulk_walk_cmd` which walks multiple varbinds simultaneously + - Each varbind respects `lexicographicMode=False` independently + - Each varbind walks only its own OID subtree and stops at its boundary + - Prevents walking beyond requested subtrees and eliminates duplicate OIDs + """ + snmp_engine = self.get_snmp_engine() + + async for ( error_indication, error_status, error_index, varbind_table, - ) in bulkCmd( - self.snmpEngine, + ) in multi_bulk_walk_cmd( + snmp_engine, auth_data, transport, context_data, @@ -437,6 +500,7 @@ def run_bulk_request( MAX_REPETITIONS, *varbinds_bulk, lexicographicMode=False, + lookupMib=True, ignoreNonIncreasingOid=is_increasing_oids_ignored(ir.address, ir.port), ): if not _any_failure_happened( @@ -445,7 +509,7 @@ def run_bulk_request( error_index, varbind_table, ir.address, - walk, + is_walk, ): _, tmp_mibs, _ = self.process_snmp_data( varbind_table, metrics, address, bulk_mapping @@ -465,7 +529,7 @@ def load_mibs(self, mibs: List[str]) -> None: for mib in mibs: if mib: try: - self.builder.loadModules(mib) + self.builder.load_modules(mib) except Exception as e: logger.warning(f"Error loading mib for {mib}, {e}") @@ -482,17 +546,17 @@ def is_mib_known(self, id: str, oid: str, target: str) -> Tuple[bool, str]: logger.warning(f"no mib found {id} based on {oid} from {target}") return False, "" - def get_varbinds(self, address, walk=False, profiles=[]): + def get_varbinds(self, address, is_walk=False, profiles=[]): varbinds_bulk = set() varbinds_get = set() get_mapping = {} bulk_mapping = {} - if walk and not profiles: + if is_walk and not profiles: varbinds_bulk.add(ObjectType(ObjectIdentity("1.3.6"))) return varbinds_get, get_mapping, varbinds_bulk, bulk_mapping joined_profile_object = self.profiles_collection.get_polling_info_from_profiles( - profiles, walk + profiles, is_walk ) if joined_profile_object: mib_families = joined_profile_object.get_mib_families() @@ -631,7 +695,13 @@ def handle_groupkey_without_metrics(self, group_key, index, mapping, metrics): metrics[group_key]["profiles"] = [] def init_snmp_data(self, varbind): - mib, metric, index = varbind[0].getMibSymbol() + """ + Extract SNMP varbind information. + :param varbind: ObjectType + + :return: A index, metric, mib, oid, varbind_id + """ + oid = str(varbind[0].get_oid()) varbind_id = varbind[0].prettyPrint() - oid = str(varbind[0].getOid()) + mib, metric, index = varbind[0].get_mib_symbol() return index, metric, mib, oid, varbind_id diff --git a/splunk_connect_for_snmp/snmp/multi_bulk_walk_cmd.py b/splunk_connect_for_snmp/snmp/multi_bulk_walk_cmd.py new file mode 100644 index 000000000..34ffc10a8 --- /dev/null +++ b/splunk_connect_for_snmp/snmp/multi_bulk_walk_cmd.py @@ -0,0 +1,328 @@ +# Copyright 2021 Splunk Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import AsyncGenerator + +from pysnmp.entity.engine import SnmpEngine +from pysnmp.hlapi import varbinds +from pysnmp.hlapi.v3arch.asyncio.auth import CommunityData, UsmUserData +from pysnmp.hlapi.v3arch.asyncio.cmdgen import bulk_cmd +from pysnmp.hlapi.v3arch.asyncio.context import ContextData +from pysnmp.hlapi.v3arch.asyncio.lcd import CommandGeneratorLcdConfigurator +from pysnmp.hlapi.v3arch.asyncio.transport import AbstractTransportTarget +from pysnmp.proto import errind +from pysnmp.proto.rfc1902 import Integer32, Null +from pysnmp.proto.rfc1905 import EndOfMibView, endOfMibView +from pysnmp.smi.rfc1902 import ObjectIdentity, ObjectType + +VB_PROCESSOR = varbinds.CommandGeneratorVarBinds() +LCD = CommandGeneratorLcdConfigurator() +is_end_of_mib = varbinds.is_end_of_mib + + +async def multi_bulk_walk_cmd( + snmpEngine: SnmpEngine, + authData: "CommunityData | UsmUserData", + transportTarget: AbstractTransportTarget, + contextData: ContextData, + nonRepeaters: int, + maxRepetitions: int, + *varBinds: ObjectType, + **options, +) -> AsyncGenerator[ + "tuple[errind.ErrorIndication | None, Integer32 | str | int | None, Integer32 | int | None, tuple[ObjectType, ...]]", + None, +]: + r"""Creates a generator to perform SNMP GETBULK queries with multiple OID trees. + + This function extends the bulk_walk_cmd functionality to support walking multiple + OID trees simultaneously while respecting lexicographic mode for each varbind + independently. Each varbind is tracked separately and will stop iteration when + it reaches its natural boundary or goes out of scope. + + On each iteration, new SNMP GETBULK request is sent (:RFC:`1905#section-4.2.3`). + The iterator blocks waiting for response to arrive or error to occur. Unlike + bulk_walk_cmd which only handles a single varbind, this function can handle + multiple varbinds in parallel, applying lexicographic mode filtering to each. + + Parameters + ---------- + snmpEngine : :py:class:`~pysnmp.hlapi.v3arch.asyncio.SnmpEngine` + Class instance representing SNMP engine. + + authData : :py:class:`~pysnmp.hlapi.v3arch.asyncio.CommunityData` or :py:class:`~pysnmp.hlapi.v3arch.asyncio.UsmUserData` + Class instance representing SNMP credentials. + + transportTarget : :py:class:`~pysnmp.hlapi.v3arch.asyncio.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.v3arch.asyncio.Udp6TransportTarget` + Class instance representing transport type along with SNMP peer address. + + contextData : :py:class:`~pysnmp.hlapi.v3arch.asyncio.ContextData` + Class instance representing SNMP ContextEngineId and ContextName values. + + nonRepeaters : int + One MIB variable is requested in response for the first + `nonRepeaters` MIB variables in request. + + maxRepetitions : int + `maxRepetitions` MIB variables are requested in response for each + of the remaining MIB variables in the request (e.g. excluding + `nonRepeaters`). Remote SNMP engine may choose lesser value than + requested. + + *varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType` + One or more class instances representing MIB variables to place + into SNMP request. Each varbind represents a separate OID tree to walk. + + Other Parameters + ---------------- + \*\*options : + Request options: + + * `lookupMib` - load MIB and resolve response MIB variables at + the cost of slightly reduced performance. Default is `True`. + * `lexicographicMode` - walk SNMP agent's MIB till the end (if `True`), + otherwise (if `False`) stop iteration when all response MIB + variables leave the scope of initial MIB variables in + `varBinds`. Default is `True`. + * `ignoreNonIncreasingOid` - continue iteration even if response + MIB variables (OIDs) are not greater then request MIB variables. + Be aware that setting it to `True` may cause infinite loop between + SNMP management and agent applications. Default is `False`. + * `maxRows` - stop iteration once this generator instance processed + `maxRows` of SNMP conceptual table. Default is `0` (no limit). + * `maxCalls` - stop iteration once this generator instance processed + `maxCalls` responses. Default is 0 (no limit). + + Yields + ------ + errorIndication : str + True value indicates SNMP engine error. + errorStatus : str + True value indicates SNMP PDU error. + errorIndex : int + Non-zero value refers to varBinds[errorIndex-1] + varBinds : tuple + A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class + instances representing MIB variables returned in SNMP response. + Contains all valid OIDs from the current GETBULK response batch. + + Raises + ------ + PySnmpError + Or its derivative indicating that an error occurred while + performing SNMP operation. + + Notes + ----- + Key Behavioral Differences from bulk_walk_cmd: + + - Supports multiple varbinds simultaneously: the generator can walk multiple + OID subtrees in parallel within a single SNMP session. + - Each varbind is independently tracked: completion, last OID, and + lexicographic boundaries are maintained per varbind. + - lexicographicMode is applied per-varbind: a varbind stops only when its + own scope is exhausted if lexicographicMode=False. Other varbinds continue. + - Can optionally ignore non-increasing OIDs: if ignoreNonIncreasingOid=True, + the generator continues even when the agent returns OIDs that are not + greater than previous ones. + - Returns all valid responses from each GETBULK call in a single yield: all + active varbinds' results are grouped together in the same batch. + - Respects maxRows and maxCalls limits across all varbinds combined. + - Handles timeouts gracefully: the generator continues unless the error + is non-recoverable. + + """ + lexicographicMode = options.get("lexicographicMode", True) + ignoreNonIncreasingOid = options.get("ignoreNonIncreasingOid", False) + maxRows = options.get("maxRows", 0) + maxCalls = options.get("maxCalls", 0) + + initialVars = [x[0] for x in VB_PROCESSOR.make_varbinds(snmpEngine.cache, varBinds)] + num_varbinds = len(initialVars) + + # Track state for each varbind independently + completed = [False] * num_varbinds + + # Track last seen OID per varbind (used for next bulk_cmd request) + last_oids = [ + vb[0] if isinstance(vb[0], ObjectIdentity) else ObjectIdentity(vb[0]) + for vb in varBinds + ] + + totalRows = 0 + totalCalls = 0 + + while True: + + # Used to keep valid varbinds to yield. + valid_results = [] + if varBinds: + if maxRows and totalRows < maxRows: + maxRepetitions = min(maxRepetitions, maxRows - totalRows) + + # Build request varbinds from currently active (non-completed) varbinds + active_indices = [i for i in range(num_varbinds) if not completed[i]] + + if not active_indices: + return + + # Create request varbinds using last known OIDs for active varbinds + request_varbinds = [ + ObjectType( + ( + last_oids[i] + if isinstance(last_oids[i], ObjectIdentity) + else ObjectIdentity(last_oids[i]) + ), + Null(""), + ) + for i in active_indices + ] + + errorIndication, errorStatus, errorIndex, varBindTable = await bulk_cmd( + snmpEngine, + authData, + transportTarget, + contextData, + nonRepeaters, + maxRepetitions, + *request_varbinds, + **dict(lookupMib=options.get("lookupMib", True)), + ) + + if ( + ignoreNonIncreasingOid + and errorIndication + and isinstance(errorIndication, errind.OidNotIncreasing) + ): + errorIndication = None + + if errorIndication: + yield ( + errorIndication, + errorStatus, + errorIndex, + varBindTable and tuple(varBindTable) or (), + ) + if errorIndication != errind.requestTimedOut: + return + elif errorStatus: + # SNMP PDU errors from agent + if errorStatus == 2: + # Hide SNMPv1 noSuchName error which leaks in here + # from SNMPv1 Agent through internal pysnmp proxy + errorStatus = 0 + errorIndex = 0 + + yield ( + errorIndication, + errorStatus, + errorIndex, + tuple(ObjectType(last_oids[i], Null("")) for i in active_indices), + ) + return + else: + if not varBindTable: + return + + num_active = len(active_indices) + stopFlag = True + + for idx, response_vb in enumerate(varBindTable): + active_vb_idx = idx % num_active + original_idx = active_indices[active_vb_idx] + name, val = response_vb + + # Check if beyond initial scope (when lexicographicMode=False) + foundEnding = isinstance(val, (Null, EndOfMibView)) + foundBeyond = not lexicographicMode and not initialVars[ + original_idx + ].isPrefixOf(name) + + is_end_of_mib_val = val is endOfMibView + + if foundEnding or foundBeyond or is_end_of_mib_val: + completed[original_idx] = True + continue + + # Valid response - add to results + valid_results.append(response_vb) + last_oids[original_idx] = name + stopFlag = False + + totalRows += len(valid_results) + totalCalls += 1 + + # If stopFlag is True, all varbinds have completed + if stopFlag: + # Check if we have any final results to yield + if valid_results: + yield ( + errorIndication, + errorStatus, + errorIndex, + tuple(valid_results), + ) + return + + # If no valid results, stop walking + if not valid_results: + return + + if maxRows and totalRows > maxRows: + excess = totalRows - maxRows + keep_count = len(valid_results) - excess + + if keep_count > 0: + valid_results = valid_results[:keep_count] + totalRows = maxRows + else: + # This entire batch exceeds the limit + return + + else: + errorIndication = None + errorStatus = None + errorIndex = None + varBinds = () + + # Yield all collected valid varBinds for this batch + initialVarBinds: "tuple[ObjectType, ...]|None" = ( + yield errorIndication, + errorStatus, + errorIndex, + tuple(valid_results), + ) + + if initialVarBinds: + num_new = len(initialVarBinds) + + new_initial_vars = [ + x[0] + for x in VB_PROCESSOR.make_varbinds(snmpEngine.cache, initialVarBinds) + ] + + initialVars = new_initial_vars + num_varbinds = num_new + completed = [False] * num_varbinds + last_oids = [vb[0] for vb in initialVarBinds] + + if maxRows and totalRows >= maxRows: + return + + if maxCalls and totalCalls >= maxCalls: + return + + if all(completed): + return diff --git a/splunk_connect_for_snmp/snmp/tasks.py b/splunk_connect_for_snmp/snmp/tasks.py index 03bc2d238..ede4eb9a0 100644 --- a/splunk_connect_for_snmp/snmp/tasks.py +++ b/splunk_connect_for_snmp/snmp/tasks.py @@ -15,6 +15,7 @@ # import logging import re +from asyncio import run from contextlib import suppress from pysnmp.smi.error import SmiError @@ -55,22 +56,7 @@ IPv6_ENABLED = human_bool(os.getenv("IPv6_ENABLED", "false").lower()) -@shared_task( - bind=True, - base=Poller, - retry_backoff=30, - retry_backoff_max=WALK_RETRY_MAX_INTERVAL, - max_retries=WALK_MAX_RETRIES, - autoretry_for=( - MongoLockLocked, - SnmpActionError, - ), - throws=( - SnmpActionError, - SnmpActionError, - ), -) -def walk(self, **kwargs): +async def walk_async_wrapper(self: Poller, **kwargs): address = kwargs["address"] profile = kwargs.get("profile", []) group = kwargs.get("group") @@ -84,7 +70,7 @@ def walk(self, **kwargs): ir = get_inventory(mongo_inventory, address) retry = True while retry: - retry, result = self.do_work(ir, walk=True, profiles=profile) + retry, result = await self.do_work(ir, is_walk=True, profiles=profile) # After a Walk tell schedule to recalc work = { @@ -102,15 +88,23 @@ def walk(self, **kwargs): @shared_task( bind=True, base=Poller, - default_retry_delay=5, - max_retries=3, - retry_backoff=True, - retry_backoff_max=1, - retry_jitter=True, - expires=30, + retry_backoff=30, + retry_backoff_max=WALK_RETRY_MAX_INTERVAL, + max_retries=WALK_MAX_RETRIES, + autoretry_for=( + MongoLockLocked, + SnmpActionError, + ), + throws=( + MongoLockLocked, + SnmpActionError, + ), ) -def poll(self, **kwargs): +def walk(self, **kwargs): + return run(walk_async_wrapper(self, **kwargs)) + +async def poll_async_wrapper(self: Poller, **kwargs): address = kwargs["address"] profiles = kwargs["profiles"] group = kwargs.get("group") @@ -119,7 +113,7 @@ def poll(self, **kwargs): mongo_inventory = mongo_db.inventory ir = get_inventory(mongo_inventory, address) - _, result = self.do_work(ir, profiles=profiles) + _, result = await self.do_work(ir, profiles=profiles) # After a Walk tell schedule to recalc work = { @@ -135,6 +129,20 @@ def poll(self, **kwargs): return work +@shared_task( + bind=True, + base=Poller, + default_retry_delay=5, + max_retries=3, + retry_backoff=True, + retry_backoff_max=1, + retry_jitter=True, + expires=30, +) +def poll(self, **kwargs): + return run(poll_async_wrapper(self, **kwargs)) + + @ttl_lru_cache(maxsize=MAX_DNS_CACHE_SIZE_TRAPS, ttl=TTL_DNS_CACHE_TRAPS) def resolve_address(address: str): try: @@ -147,9 +155,9 @@ def resolve_address(address: str): @shared_task(bind=True, base=Poller) -def trap(self, work): - varbind_table, not_translated_oids, remaining_oids, remotemibs = [], [], [], set() - metrics = {} +def trap(self: Poller, work): + varbind_table, not_translated_oids, remaining_oids, remotemibs = [], [], [], set() # type: ignore + metrics = {} # type: ignore work["host"] = format_ipv4_address(work["host"]) _process_work_data(self, work, varbind_table, not_translated_oids) @@ -169,7 +177,7 @@ def trap(self, work): return _build_result(result, work["host"], fields) -def _process_work_data(self, work, varbind_table, not_translated_oids): +def _process_work_data(self: Poller, work, varbind_table, not_translated_oids): """Process the data in work to populate varbinds.""" for w in work["data"]: if OID_VALIDATOR.match(w[1]): @@ -177,7 +185,7 @@ def _process_work_data(self, work, varbind_table, not_translated_oids): try: varbind_table.append( - ObjectType(ObjectIdentity(w[0]), w[1]).resolveWithMib( + ObjectType(ObjectIdentity(w[0]), w[1]).resolve_with_mib( self.mib_view_controller ) ) @@ -185,7 +193,7 @@ def _process_work_data(self, work, varbind_table, not_translated_oids): not_translated_oids.append((w[0], w[1])) -def _load_mib_if_needed(self, oid, host): +def _load_mib_if_needed(self: Poller, oid, host): """Load the MIB if it is known and not already loaded.""" with suppress(Exception): found, mib = self.is_mib_known(oid, oid, host) @@ -195,7 +203,7 @@ def _load_mib_if_needed(self, oid, host): def _process_remaining_oids( - self, not_translated_oids, remotemibs, remaining_oids, host, varbind_table + self: Poller, not_translated_oids, remotemibs, remaining_oids, host, varbind_table ): """Process OIDs that could not be translated and add them to other oids.""" for oid in not_translated_oids: @@ -210,12 +218,12 @@ def _process_remaining_oids( _resolve_remaining_oids(self, remaining_oids, varbind_table) -def _resolve_remaining_oids(self, remaining_oids, varbind_table): +def _resolve_remaining_oids(self: Poller, remaining_oids, varbind_table): """Resolve remaining OIDs.""" for w in remaining_oids: try: varbind_table.append( - ObjectType(ObjectIdentity(w[0]), w[1]).resolveWithMib( + ObjectType(ObjectIdentity(w[0]), w[1]).resolve_with_mib( self.mib_view_controller ) ) diff --git a/splunk_connect_for_snmp/snmp/varbinds_resolver.py b/splunk_connect_for_snmp/snmp/varbinds_resolver.py index 7f58a7269..2288394d7 100644 --- a/splunk_connect_for_snmp/snmp/varbinds_resolver.py +++ b/splunk_connect_for_snmp/snmp/varbinds_resolver.py @@ -247,9 +247,9 @@ def process_profiles(self): current_profile.process() self.list_of_profiles[profile_name] = current_profile - def get_polling_info_from_profiles(self, profiles_names, walk=False) -> Profile: + def get_polling_info_from_profiles(self, profiles_names, is_walk=False) -> Profile: profiles = [self.get_profile(name) for name in profiles_names] - if len(profiles) == 1 or walk: + if len(profiles) == 1 or is_walk: return profiles[0] return reduce(self.combine_profiles, profiles) diff --git a/splunk_connect_for_snmp/traps.py b/splunk_connect_for_snmp/traps.py index 44767735c..3df75b0b9 100644 --- a/splunk_connect_for_snmp/traps.py +++ b/splunk_connect_for_snmp/traps.py @@ -137,13 +137,18 @@ def decode_security_context(hexstr: bytes) -> str | None: # Callback function for receiving notifications # noinspection PyUnusedLocal def cb_fun( - snmp_engine, state_reference, context_engine_id, context_name, varbinds, cb_ctx + snmp_engine: engine.SnmpEngine, + state_reference, + context_engine_id, + context_name, + varbinds, + cb_ctx, ): logger.debug( 'Notification from ContextEngineId "%s", ContextName "%s"' % (context_engine_id.prettyPrint(), context_name.prettyPrint()) ) - exec_context = snmp_engine.observer.getExecutionContext( + exec_context = snmp_engine.observer.get_execution_context( "rfc3412.receiveMessage:request" ) @@ -187,18 +192,18 @@ def authentication_observer_cb_fun(snmp_engine, execpoint, variables, contexts): ) -def add_communities(config_base, snmp_engine): +def add_communities(config_base: dict, snmp_engine: engine.SnmpEngine): idx = 0 if "communities" in config_base: if "2c" in config_base["communities"]: for community in config_base["communities"]["2c"]: idx += 1 - config.addV1System(snmp_engine, idx, community) + config.add_v1_system(snmp_engine, str(idx), community) if "1" in config_base["communities"] or 1 in config_base["communities"]: v = config_base["communities"].get("1", config_base["communities"].get(1)) for community in v: idx += 1 - config.addV1System(snmp_engine, idx, community) + config.add_v1_system(snmp_engine, str(idx), community) def main(): @@ -212,7 +217,7 @@ def main(): # Register a callback function to log errors with traps authentication observer_context: Dict[Any, Any] = {} - snmp_engine.observer.registerObserver( + snmp_engine.observer.register_observer( authentication_observer_cb_fun, "rfc2576.prepareDataElements:sm-failure", "rfc3412.prepareDataElements:sm-failure", @@ -221,17 +226,17 @@ def main(): # UDP socket over IPv6 listens also for IPv4 if IPv6_ENABLED: - config.addTransport( + config.add_transport( snmp_engine, - udp6.domainName, - udp6.Udp6Transport().openServerMode(("::", 2162)), + udp6.DOMAIN_NAME, + udp6.Udp6Transport().open_server_mode(("::", 2162)), ) else: # UDP over IPv4, first listening interface/port - config.addTransport( + config.add_transport( snmp_engine, - udp.domainName, - udp.UdpTransport().openServerMode(("0.0.0.0", 2162)), + udp.DOMAIN_NAME, + udp.UdpTransport().open_server_mode(("0.0.0.0", 2162)), ) with open(CONFIG_PATH, encoding="utf-8") as file: @@ -260,13 +265,13 @@ def main(): priv_protocol = PrivProtocolMap.get(priv_protocol.upper(), "NONE") for security_engine_id in SECURITY_ENGINE_ID_LIST: - config.addV3User( + config.add_v3_user( snmp_engine, userName=username, authProtocol=auth_protocol, - authKey=auth_key, + authKey=auth_key if auth_key else None, privProtocol=priv_protocol, - privKey=priv_key, + privKey=priv_key if priv_key else None, securityEngineId=v2c.OctetString(hexValue=security_engine_id), ) logger.debug( diff --git a/splunk_connect_for_snmp/walk.py b/splunk_connect_for_snmp/walk.py index 386dab603..67b40fb86 100644 --- a/splunk_connect_for_snmp/walk.py +++ b/splunk_connect_for_snmp/walk.py @@ -15,6 +15,7 @@ # import logging import sys +from asyncio import run from csv import DictReader from splunk_connect_for_snmp.common.inventory_record import InventoryRecord @@ -30,7 +31,7 @@ logger.addHandler(handler) -def run_walk(): +async def run_walk(): poller = Poller(no_mongo=True) with open("inventory.csv", encoding="utf-8") as csv_file: @@ -44,11 +45,11 @@ def run_walk(): ir = InventoryRecord(**source_record) retry = True while retry: - retry, result = poller.do_work(ir, walk=True) + retry, result = await poller.do_work(ir, walk=True) logger.debug(result) except Exception as e: logger.exception(e) if __name__ == "__main__": - run_walk() + run(run_walk()) diff --git a/test/multi_bulk_walk_cmd/__init__.py b/test/multi_bulk_walk_cmd/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/multi_bulk_walk_cmd/agent_context.py b/test/multi_bulk_walk_cmd/agent_context.py new file mode 100644 index 000000000..6e365d332 --- /dev/null +++ b/test/multi_bulk_walk_cmd/agent_context.py @@ -0,0 +1,80 @@ +import asyncio + +from pysnmp.carrier.asyncio.dgram import udp, udp6 +from pysnmp.entity import config, engine +from pysnmp.entity.rfc3413 import cmdrsp, context +from pysnmp.hlapi.v3arch.asyncio import SnmpEngine + +# Set the port to 1611 instead of 161, because 161 is a +# privileged port and requires root access +AGENT_PORT = 1611 + + +async def start_agent( + enable_ipv6: bool = False, +) -> SnmpEngine: + snmpEngine = engine.SnmpEngine() + + config.add_transport( + snmpEngine, + udp.DOMAIN_NAME, + udp.UdpTransport().open_server_mode(("localhost", AGENT_PORT)), + ) + + if enable_ipv6: + config.add_transport( + snmpEngine, + udp6.DOMAIN_NAME, + udp6.Udp6Transport().open_server_mode(("localhost", AGENT_PORT)), + ) + + config.add_v1_system(snmpEngine, "public", "public") + + # Allow read MIB access for this user / securityModels at VACM + config.add_vacm_user(snmpEngine, 2, "public", "noAuthNoPriv", (1, 3, 6), (1, 3, 6)) + + # Configure SNMP context + snmpContext = context.SnmpContext(snmpEngine) + cmdrsp.GetCommandResponder(snmpEngine, snmpContext) + cmdrsp.NextCommandResponder(snmpEngine, snmpContext) + cmdrsp.BulkCommandResponder(snmpEngine, snmpContext) + cmdrsp.SetCommandResponder(snmpEngine, snmpContext) + + snmpEngine.transport_dispatcher.job_started(1) + + snmpEngine.open_dispatcher() + + await asyncio.sleep(1) + + return snmpEngine + + +class AgentContextManager: + """ + A context manager for managing the lifecycle of an SNMP test agent. + + Usage: + async with AgentContextManager() as agent: + # Perform operations with the agent + + When the context is entered, the agent is started using the `start_agent()` function. + When the context is exited, the agent's transport dispatcher is stopped and closed. + """ + + def __init__( + self, + enable_ipv6: bool = False, + enable_custom_objects: bool = False, + enable_table_creation: bool = False, + ): + self.enable_ipv6 = enable_ipv6 + self.enable_custom_objects = enable_custom_objects + self.enable_table_creation = enable_table_creation + + async def __aenter__(self): + self.agent = await start_agent(self.enable_ipv6) + return self.agent + + async def __aexit__(self, exc_type, exc_val, exc_tb): + self.agent.transport_dispatcher.job_finished(1) + self.agent.close_dispatcher() diff --git a/test/multi_bulk_walk_cmd/test_v2c_multibulkwalk.py b/test/multi_bulk_walk_cmd/test_v2c_multibulkwalk.py new file mode 100644 index 000000000..fcff9bf65 --- /dev/null +++ b/test/multi_bulk_walk_cmd/test_v2c_multibulkwalk.py @@ -0,0 +1,295 @@ +import pytest +from pysnmp.hlapi.v3arch.asyncio import ( + CommunityData, + ContextData, + ObjectIdentity, + ObjectType, + SnmpEngine, + UdpTransportTarget, +) +from pysnmp.proto.errind import RequestTimedOut + +from splunk_connect_for_snmp.snmp.multi_bulk_walk_cmd import multi_bulk_walk_cmd + +from .agent_context import AGENT_PORT, AgentContextManager + + +@pytest.mark.asyncio +async def test_v2c_multi_bulk_walk_multiple_subtrees(): + """ + Test that multi_bulk_walk_cmd can walk multiple OID trees in parallel + and complete independently. + """ + async with AgentContextManager(): + snmpEngine = SnmpEngine() + # We walk 'system' and 'snmp' subtrees in parallel + max_repetitions = 2 + objects = multi_bulk_walk_cmd( + snmpEngine, + CommunityData("public"), + await UdpTransportTarget.create(("localhost", AGENT_PORT)), + ContextData(), + 0, + max_repetitions, + ObjectType(ObjectIdentity("SNMPv2-MIB", "system")), + ObjectType(ObjectIdentity("SNMPv2-MIB", "snmp")), + maxRows=20, + lexicographicMode=False, + ) + + count = 0 + seen_system = False + seen_snmp = False + async for errorIndication, errorStatus, errorIndex, varBinds in objects: + assert errorIndication is None + assert errorStatus == 0 + assert isinstance(varBinds, tuple) + assert len(varBinds) > 0 + for vb in varBinds: + name = vb[0].prettyPrint() + if name.startswith("SNMPv2-MIB::sys"): + seen_system = True + if name.startswith("SNMPv2-MIB::snmp"): + seen_snmp = True + count += 1 + + assert seen_system and seen_snmp + assert count > 0 + + +@pytest.mark.asyncio +async def test_v2c_multi_bulk_walk_lookupmib_false(): + async with AgentContextManager(): + snmpEngine = SnmpEngine() + count = 0 + max_repetitions = 2 + objects = multi_bulk_walk_cmd( + snmpEngine, + CommunityData("public"), + await UdpTransportTarget.create(("localhost", AGENT_PORT)), + ContextData(), + 0, + max_repetitions, + ObjectType(ObjectIdentity("SNMPv2-MIB", "system")), + ObjectType(ObjectIdentity("SNMPv2-MIB", "snmp")), + lookupMib=False, + lexicographicMode=False, + ) + + async for errorIndication, errorStatus, errorIndex, varBinds in objects: + assert errorIndication is None + assert errorStatus == 0 + assert isinstance(varBinds, tuple) + count += 1 + if count >= 5: + break + + assert count >= 3 + + +@pytest.mark.asyncio +async def test_v2c_multi_bulk_walk_lookupmib_true(): + async with AgentContextManager(): + snmpEngine = SnmpEngine() + count = 0 + max_repetitions = 1 + objects = multi_bulk_walk_cmd( + snmpEngine, + CommunityData("public"), + await UdpTransportTarget.create(("localhost", AGENT_PORT)), + ContextData(), + 0, + max_repetitions, + ObjectType(ObjectIdentity("SNMPv2-MIB", "system")), + ObjectType(ObjectIdentity("SNMPv2-MIB", "snmp")), + lookupMib=True, + ) + + async for errorIndication, errorStatus, errorIndex, varBinds in objects: + assert errorIndication is None + assert errorStatus == 0 + count += 1 + if count > 5: + break + + assert count > 0 + + +@pytest.mark.asyncio +async def test_v2c_multi_bulk_walk_maxrows_limit(): + max_rows = 6 + async with AgentContextManager(): + snmpEngine = SnmpEngine() + total_seen = 0 + max_repetitions = 2 + objects = multi_bulk_walk_cmd( + snmpEngine, + CommunityData("public"), + await UdpTransportTarget.create(("localhost", AGENT_PORT)), + ContextData(), + 0, + max_repetitions, + ObjectType(ObjectIdentity("SNMPv2-MIB", "system")), + ObjectType(ObjectIdentity("SNMPv2-MIB", "snmp")), + lexicographicMode=False, + maxRows=max_rows, + ) + + async for errorIndication, errorStatus, errorIndex, varBinds in objects: + assert errorIndication is None + assert errorStatus == 0 + total_seen += len(varBinds) + + assert total_seen == max_rows + + +@pytest.mark.asyncio +async def test_v2c_multi_bulk_walk_maxcalls_limit(): + async with AgentContextManager(): + snmpEngine = SnmpEngine() + max_calls = 2 + call_count = 0 + max_repetitions = 4 + + objects = multi_bulk_walk_cmd( + snmpEngine, + CommunityData("public"), + await UdpTransportTarget.create(("localhost", AGENT_PORT)), + ContextData(), + 0, + max_repetitions, + ObjectType(ObjectIdentity("SNMPv2-MIB", "system")), + ObjectType(ObjectIdentity("SNMPv2-MIB", "snmp")), + maxCalls=max_calls, + lexicographicMode=False, + ) + + async for errorIndication, errorStatus, errorIndex, varBinds in objects: + call_count += 1 + + assert call_count <= max_calls + + +@pytest.mark.asyncio +async def test_v2c_multi_bulk_walk_maxcalls_with_maxrows_varbinds_count_limit(): + async with AgentContextManager(): + snmpEngine = SnmpEngine() + max_calls = 2 + max_rows = 10 + call_count = 0 + total_count = 0 + max_repetitions = 10 + + objects = multi_bulk_walk_cmd( + snmpEngine, + CommunityData("public"), + await UdpTransportTarget.create(("localhost", AGENT_PORT)), + ContextData(), + 0, + max_repetitions, + ObjectType(ObjectIdentity("SNMPv2-MIB", "system")), + ObjectType(ObjectIdentity("SNMPv2-MIB", "snmp")), + maxCalls=max_calls, + maxRows=max_rows, + ) + + async for errorIndication, errorStatus, errorIndex, varBinds in objects: + total_count += len(varBinds) + call_count += 1 + + assert total_count == max_rows + assert call_count <= max_calls + + +@pytest.mark.asyncio +async def test_v2c_multi_bulk_walk_ignore_non_increasing_oid(): + async with AgentContextManager(): + snmpEngine = SnmpEngine() + max_repetitions = 2 + + objects = multi_bulk_walk_cmd( + snmpEngine, + CommunityData("public"), + await UdpTransportTarget.create(("localhost", AGENT_PORT)), + ContextData(), + 0, + max_repetitions, + ObjectType(ObjectIdentity("SNMPv2-MIB", "system")), + ObjectType(ObjectIdentity("SNMPv2-MIB", "snmp")), + ignoreNonIncreasingOid=True, + ) + + count = 0 + async for errorIndication, errorStatus, errorIndex, varBinds in objects: + assert errorStatus == 0 + count += 1 + if count > 3: + break + + assert count > 0 + + +@pytest.mark.asyncio +async def test_v2c_multi_bulk_walk_0_4_subtree(): + async with AgentContextManager(): + snmpEngine = SnmpEngine() + index = 0 + max_repetitions = 4 + async for ( + errorIndication, + errorStatus, + errorIndex, + varBinds, + ) in multi_bulk_walk_cmd( + snmpEngine, + CommunityData("public"), + await UdpTransportTarget.create(("localhost", AGENT_PORT)), + ContextData(), + 0, + max_repetitions, + ObjectType(ObjectIdentity("SNMPv2-MIB", "snmp")), + lexicographicMode=False, + ): + assert errorIndication is None + assert errorStatus == 0 + assert len(varBinds) == 4 + if index == 0: + assert varBinds[0][0].prettyPrint() == "SNMPv2-MIB::snmpInPkts.0" + + if index == 1: + assert ( + varBinds[0][0].prettyPrint() + == "SNMPv2-MIB::snmpInBadCommunityUses.0" + ) + + if index == 26: + assert varBinds[0][0].prettyPrint() == "SNMPv2-MIB::snmpSilentDrops.0" + + if index == 27: + assert varBinds[0][0].prettyPrint() == "SNMPv2-MIB::snmpProxyDrops.0" + + index += 1 + + assert index > 0 + + +@pytest.mark.asyncio +async def test_v2c_multi_bulk_walk_non_exist(): + snmpEngine = SnmpEngine() + objects = multi_bulk_walk_cmd( + snmpEngine, + CommunityData("public"), + await UdpTransportTarget.create( + ("localhost", AGENT_PORT), + timeout=0.5, + retries=0, + ), + ContextData(), + 0, + 1, + ObjectType(ObjectIdentity("SNMPv2-MIB", "sysDescr", 0)), + ) + + async for errorIndication, errorStatus, errorIndex, varBinds in objects: + assert isinstance(errorIndication, RequestTimedOut) + break diff --git a/test/snmp/test_auth.py b/test/snmp/test_auth.py index 9281227ef..549a4eaea 100644 --- a/test/snmp/test_auth.py +++ b/test/snmp/test_auth.py @@ -1,11 +1,11 @@ -from unittest import TestCase -from unittest.mock import MagicMock, Mock, mock_open, patch +from unittest import IsolatedAsyncioTestCase +from unittest.mock import AsyncMock, Mock, mock_open, patch from pysnmp.entity.config import ( - usmAesBlumenthalCfb192Protocol, - usmHMAC128SHA224AuthProtocol, - usmNoAuthProtocol, - usmNoPrivProtocol, + USM_AUTH_HMAC128_SHA224, + USM_AUTH_NONE, + USM_PRIV_CFB192_AES_BLUMENTHAL, + USM_PRIV_NONE, ) from pysnmp.proto.rfc1902 import OctetString @@ -41,7 +41,7 @@ ) -class TestAuth(TestCase): +class TestAuth(IsolatedAsyncioTestCase): @patch("builtins.open", new_callable=mock_open, read_data=mock_value) @patch("os.path.exists") def test_get_secret_value_exists(self, m_exists, m_open): @@ -66,9 +66,9 @@ def test_get_secret_value_default(self, m_exists): value = get_secret_value("/location", "key", default="default value") self.assertEqual("default value", value) - @patch("splunk_connect_for_snmp.snmp.auth.getCmd") + @patch("splunk_connect_for_snmp.snmp.auth.get_cmd", new_callable=AsyncMock) @patch("splunk_connect_for_snmp.snmp.auth.fetch_security_engine_id") - def test_get_security_engine_id_not_present(self, m_fetch, m_get_cmd): + async def test_get_security_engine_id_not_present(self, m_fetch, m_get_cmd): ir2 = InventoryRecord( **{ "address": "192.168.0.1", @@ -87,24 +87,20 @@ def test_get_security_engine_id_not_present(self, m_fetch, m_get_cmd): snmpEngine = Mock() logger = Mock() - m_get_cmd.return_value = iter( - [(None, 0, 0, "Oid1"), (None, 0, 0, "Oid2"), (None, 0, 0, "Oid3")] - ) + m_get_cmd.return_value = (None, 0, 0, ["Oid1", "Oid2", "Oid3"]) m_fetch.side_effect = Exception("boom") with self.assertRaises(Exception) as e: - get_security_engine_id(logger, ir2, snmpEngine) + await get_security_engine_id(logger, ir2, snmpEngine) self.assertEqual("boom", e.exception.args[0]) - - calls = snmpEngine.observer.registerObserver.call_args_list + calls = snmpEngine.observer.register_observer.call_args_list self.assertEqual("rfc3412.prepareDataElements:internal", calls[0].args[1]) - m_get_cmd.assert_called() - @patch("splunk_connect_for_snmp.snmp.auth.getCmd") + @patch("splunk_connect_for_snmp.snmp.auth.get_cmd", new_callable=AsyncMock) @patch("splunk_connect_for_snmp.snmp.auth.fetch_security_engine_id") - def test_get_security_engine_id(self, m_fetch, m_get_cmd): + async def test_get_security_engine_id(self, m_fetch, m_get_cmd): ir2 = InventoryRecord( **{ "address": "192.168.0.1", @@ -121,17 +117,16 @@ def test_get_security_engine_id(self, m_fetch, m_get_cmd): ) snmpEngine = Mock() + snmpEngine.observer = Mock() + snmpEngine.observer.register_observer = Mock() + logger = Mock() m_fetch.return_value = "My test value" + m_get_cmd.return_value = (None, 0, 0, ["Oid1", "Oid2", "Oid3"]) - m_get_cmd.return_value = iter( - [(None, 0, 0, "Oid1"), (None, 0, 0, "Oid2"), (None, 0, 0, "Oid3")] - ) - - result = get_security_engine_id(logger, ir2, snmpEngine) - - calls = snmpEngine.observer.registerObserver.call_args_list + result = await get_security_engine_id(logger, ir2, snmpEngine) + calls = snmpEngine.observer.register_observer.call_args_list self.assertEqual("rfc3412.prepareDataElements:internal", calls[0].args[1]) m_get_cmd.assert_called() @@ -153,7 +148,7 @@ def test_fetch_security_engine_id_missing(self): @patch("os.path.exists") @patch("splunk_connect_for_snmp.snmp.auth.get_secret_value") - def test_get_auth_v3(self, m_get_secret_value, m_exists): + async def test_get_auth_v3(self, m_get_secret_value, m_exists): m_exists.return_value = True m_get_secret_value.side_effect = [ "secret1", @@ -167,14 +162,14 @@ def test_get_auth_v3(self, m_get_secret_value, m_exists): logger = Mock() snmpEngine = Mock() - result = get_auth_v3(logger, ir, snmpEngine) + result = await get_auth_v3(logger, ir, snmpEngine) security_engine_result = OctetString(hexValue="80003a8c04") self.assertEqual("secret1", result.userName) - self.assertEqual("secret2", result.authKey) - self.assertEqual("secret3", result.privKey) - self.assertEqual("authPriv", result.securityLevel) - self.assertEqual(usmHMAC128SHA224AuthProtocol, result.authProtocol) - self.assertEqual(usmAesBlumenthalCfb192Protocol, result.privProtocol) + self.assertEqual("secret2", result.authentication_key) + self.assertEqual("secret3", result.privacy_key) + self.assertEqual("authPriv", result.security_level) + self.assertEqual(USM_AUTH_HMAC128_SHA224, result.authentication_protocol) + self.assertEqual(USM_PRIV_CFB192_AES_BLUMENTHAL, result.privacy_protocol) self.assertEqual(security_engine_result._value, result.securityEngineId._value) self.assertEqual("secret1", result.securityName) self.assertEqual(1, result.authKeyType) @@ -183,7 +178,7 @@ def test_get_auth_v3(self, m_get_secret_value, m_exists): @patch("os.path.exists") @patch("splunk_connect_for_snmp.snmp.auth.get_secret_value") @patch("splunk_connect_for_snmp.snmp.auth.get_security_engine_id") - def test_get_auth_v3_security_engine_not_str( + async def test_get_auth_v3_security_engine_not_str( self, m_get_security_engine_id, m_get_secret_value, m_exists ): m_exists.return_value = True @@ -215,16 +210,16 @@ def test_get_auth_v3_security_engine_not_str( } ) - result = get_auth_v3(logger, ir2, snmpEngine) + result = await get_auth_v3(logger, ir2, snmpEngine) m_get_security_engine_id.assert_called() self.assertEqual("secret1", result.userName) - self.assertEqual("secret2", result.authKey) - self.assertEqual("secret3", result.privKey) - self.assertEqual("authPriv", result.securityLevel) - self.assertEqual(usmHMAC128SHA224AuthProtocol, result.authProtocol) - self.assertEqual(usmAesBlumenthalCfb192Protocol, result.privProtocol) + self.assertEqual("secret2", result.authentication_key) + self.assertEqual("secret3", result.privacy_key) + self.assertEqual("authPriv", result.security_level) + self.assertEqual(USM_AUTH_HMAC128_SHA224, result.authentication_protocol) + self.assertEqual(USM_PRIV_CFB192_AES_BLUMENTHAL, result.privacy_protocol) self.assertEqual("ENGINE123", result.securityEngineId) self.assertEqual("secret1", result.securityName) self.assertEqual(1, result.authKeyType) @@ -232,7 +227,7 @@ def test_get_auth_v3_security_engine_not_str( @patch("os.path.exists") @patch("splunk_connect_for_snmp.snmp.auth.get_secret_value") - def test_get_auth_v3_exception(self, m_get_secret_value, m_exists): + async def test_get_auth_v3_exception(self, m_get_secret_value, m_exists): m_exists.return_value = False m_get_secret_value.side_effect = [ "secret1", @@ -248,12 +243,12 @@ def test_get_auth_v3_exception(self, m_get_secret_value, m_exists): snmpEngine = Mock() with self.assertRaises(Exception) as e: - get_auth_v3(logger, ir, snmpEngine) + await get_auth_v3(logger, ir, snmpEngine) self.assertEqual("invalid username from secret secret_ir", e.exception.args[0]) @patch("os.path.exists") @patch("splunk_connect_for_snmp.snmp.auth.get_secret_value") - def test_get_auth_v3_noauthnopriv(self, m_get_secret_value, m_exists): + async def test_get_auth_v3_noauthnopriv(self, m_get_secret_value, m_exists): m_exists.return_value = True m_get_secret_value.side_effect = [ "secret1", @@ -267,14 +262,14 @@ def test_get_auth_v3_noauthnopriv(self, m_get_secret_value, m_exists): logger = Mock() snmpEngine = Mock() - result = get_auth_v3(logger, ir, snmpEngine) + result = await get_auth_v3(logger, ir, snmpEngine) security_engine_result = OctetString(hexValue="80003a8c04") self.assertEqual("secret1", result.userName) - self.assertIsNone(result.authKey) - self.assertIsNone(result.privKey) - self.assertEqual("noAuthNoPriv", result.securityLevel) - self.assertEqual(usmNoAuthProtocol, result.authProtocol) - self.assertEqual(usmNoPrivProtocol, result.privProtocol) + self.assertIsNone(result.authentication_key) + self.assertIsNone(result.privacy_key) + self.assertEqual("noAuthNoPriv", result.security_level) + self.assertEqual(USM_AUTH_NONE, result.authentication_protocol) + self.assertEqual(USM_PRIV_NONE, result.privacy_protocol) self.assertEqual(security_engine_result._value, result.securityEngineId._value) self.assertEqual("secret1", result.securityName) self.assertEqual(1, result.authKeyType) @@ -282,7 +277,7 @@ def test_get_auth_v3_noauthnopriv(self, m_get_secret_value, m_exists): @patch("os.path.exists") @patch("splunk_connect_for_snmp.snmp.auth.get_secret_value") - def test_get_auth_v3_authnopriv(self, m_get_secret_value, m_exists): + async def test_get_auth_v3_authnopriv(self, m_get_secret_value, m_exists): m_exists.return_value = True m_get_secret_value.side_effect = [ "secret1", @@ -296,14 +291,14 @@ def test_get_auth_v3_authnopriv(self, m_get_secret_value, m_exists): logger = Mock() snmpEngine = Mock() - result = get_auth_v3(logger, ir, snmpEngine) + result = await get_auth_v3(logger, ir, snmpEngine) security_engine_result = OctetString(hexValue="80003a8c04") self.assertEqual("secret1", result.userName) - self.assertEqual("secret2", result.authKey) - self.assertIsNone(result.privKey) - self.assertEqual("authNoPriv", result.securityLevel) - self.assertEqual(usmHMAC128SHA224AuthProtocol, result.authProtocol) - self.assertEqual(usmNoPrivProtocol, result.privProtocol) + self.assertEqual("secret2", result.authentication_key) + self.assertIsNone(result.privacy_key) + self.assertEqual("authNoPriv", result.security_level) + self.assertEqual(USM_AUTH_HMAC128_SHA224, result.authentication_protocol) + self.assertEqual(USM_PRIV_NONE, result.privacy_protocol) self.assertEqual(security_engine_result._value, result.securityEngineId._value) self.assertEqual("secret1", result.securityName) self.assertEqual(1, result.authKeyType) @@ -312,53 +307,62 @@ def test_get_auth_v3_authnopriv(self, m_get_secret_value, m_exists): def test_get_auth_v2c(self): result = get_auth_v2c(ir) self.assertEqual("public", result.communityName) - self.assertEqual(1, result.mpModel) + self.assertEqual(1, result.message_processing_model) def test_get_auth_v1(self): result = get_auth_v1(ir) self.assertEqual("public", result.communityName) - self.assertEqual(0, result.mpModel) + self.assertEqual(0, result.message_processing_model) @patch("splunk_connect_for_snmp.snmp.auth.get_auth_v1") - def test_get_auth_1(self, m_get_auth): + async def test_get_auth_1(self, m_get_auth): ir.version = "1" - get_auth(Mock(), ir, Mock()) + await get_auth(Mock(), ir, Mock()) m_get_auth.assert_called() @patch("splunk_connect_for_snmp.snmp.auth.get_auth_v2c") - def test_get_auth_2c(self, m_get_auth): + async def test_get_auth_2c(self, m_get_auth): ir.version = "2c" - get_auth(Mock(), ir, Mock()) + await get_auth(Mock(), ir, Mock()) m_get_auth.assert_called() @patch("splunk_connect_for_snmp.snmp.auth.get_auth_v3") - def test_get_auth_3(self, m_get_auth): + async def test_get_auth_3(self, m_get_auth): ir.version = "3" - get_auth(Mock(), ir, Mock()) + await get_auth(Mock(), ir, Mock()) m_get_auth.assert_called() - @patch("splunk_connect_for_snmp.snmp.auth.Udp6TransportTarget") - @patch("splunk_connect_for_snmp.snmp.auth.UdpTransportTarget") - def test_setup_transport_target_ipv4( - self, m_setup_udp_transport_target, m_setup_udp6_transport_target - ): + @patch( + "splunk_connect_for_snmp.snmp.auth.UdpTransportTarget.create", + new_callable=AsyncMock, + ) + @patch( + "splunk_connect_for_snmp.snmp.auth.Udp6TransportTarget.create", + new_callable=AsyncMock, + ) + async def test_setup_transport_target_ipv4(self, m_udp6_create, m_udp_create): ir.address = "127.0.0.1" ir.port = 161 - m_setup_udp_transport_target.return_value = "UDP4" - m_setup_udp6_transport_target.return_value = "UDP6" - transport = setup_transport_target(ir) + + m_udp_create.return_value = "UDP4" + m_udp6_create.return_value = "UDP6" + + transport = await setup_transport_target(ir) self.assertEqual("UDP4", transport) - @patch("splunk_connect_for_snmp.snmp.auth.IPv6_ENABLED") - @patch("splunk_connect_for_snmp.snmp.auth.Udp6TransportTarget") - @patch("splunk_connect_for_snmp.snmp.auth.UdpTransportTarget") - def test_setup_transport_target_ipv6( - self, m_setup_udp_transport_target, m_setup_udp6_transport_target, ipv6_enabled - ): - ipv6_enabled.return_value = True + @patch("splunk_connect_for_snmp.snmp.auth.IPv6_ENABLED", True) + @patch( + "splunk_connect_for_snmp.snmp.auth.UdpTransportTarget.create", + new_callable=AsyncMock, + ) + @patch( + "splunk_connect_for_snmp.snmp.auth.Udp6TransportTarget.create", + new_callable=AsyncMock, + ) + async def test_setup_transport_target_ipv6(self, m_udp6_create, m_udp_create): ir.address = "2001:0db8:ac10:fe01::0001" ir.port = 161 - m_setup_udp_transport_target.return_value = "UDP4" - m_setup_udp6_transport_target.return_value = "UDP6" - transport = setup_transport_target(ir) + m_udp_create.return_value = "UDP4" + m_udp6_create.return_value = "UDP6" + transport = await setup_transport_target(ir) self.assertEqual("UDP6", transport) diff --git a/test/snmp/test_do_work.py b/test/snmp/test_do_work.py index 92c9132d4..8a6e91b69 100644 --- a/test/snmp/test_do_work.py +++ b/test/snmp/test_do_work.py @@ -1,5 +1,5 @@ -from unittest import TestCase -from unittest.mock import MagicMock, patch +from unittest import IsolatedAsyncioTestCase +from unittest.mock import AsyncMock, MagicMock, patch from splunk_connect_for_snmp.common.inventory_record import InventoryRecord from splunk_connect_for_snmp.snmp.exceptions import SnmpActionError @@ -22,54 +22,62 @@ ) -class TestDoWork(TestCase): +class TestDoWork(IsolatedAsyncioTestCase): @patch("pymongo.MongoClient", MagicMock()) @patch("mongolock.MongoLock.__init__", MagicMock()) @patch("mongolock.MongoLock.lock", MagicMock()) @patch("mongolock.MongoLock.release", MagicMock()) - @patch("splunk_connect_for_snmp.snmp.auth.get_auth", None) + @patch("splunk_connect_for_snmp.snmp.auth.get_auth", new_callable=AsyncMock) @patch("splunk_connect_for_snmp.snmp.manager.get_context_data", MagicMock()) - @patch("splunk_connect_for_snmp.snmp.manager.setup_transport_target", MagicMock()) - def test_do_work_no_work_to_do(self): + @patch( + "splunk_connect_for_snmp.snmp.manager.setup_transport_target", + new_callable=AsyncMock, + ) + async def test_do_work_no_work_to_do(self, setup_transport, get_auth): poller = Poller.__new__(Poller) poller.last_modified = 1609675634 poller.snmpEngine = None poller.profiles_manager = MagicMock() poller.profiles_collection = MagicMock() poller.profiles_collection.process_profiles = MagicMock() - poller.already_loaded_mibs = {} + poller.already_loaded_mibs = set() varbinds_bulk, varbinds_get = set(), set() get_mapping, bulk_mapping = {}, {} - poller.get_varbinds = MagicMock() - poller.get_varbinds.return_value = ( - varbinds_get, - get_mapping, - varbinds_bulk, - bulk_mapping, + poller.get_varbinds = MagicMock( + return_value=(varbinds_get, get_mapping, varbinds_bulk, bulk_mapping) ) - result = poller.do_work(inventory_record) + result = await poller.do_work(inventory_record) self.assertEqual(result, (False, {})) - @patch("pymongo.MongoClient", MagicMock()) @patch("mongolock.MongoLock.__init__", MagicMock()) @patch("mongolock.MongoLock.lock", MagicMock()) @patch("mongolock.MongoLock.release", MagicMock()) - @patch("splunk_connect_for_snmp.snmp.auth.get_auth", None) + @patch("splunk_connect_for_snmp.snmp.auth.get_auth", new_callable=AsyncMock) @patch("splunk_connect_for_snmp.snmp.manager.get_context_data", MagicMock()) - @patch("splunk_connect_for_snmp.snmp.manager.setup_transport_target", MagicMock()) - @patch("splunk_connect_for_snmp.snmp.manager.bulkCmd") - @patch("splunk_connect_for_snmp.snmp.manager.getCmd") + @patch( + "splunk_connect_for_snmp.snmp.manager.setup_transport_target", + new_callable=AsyncMock, + ) + @patch("splunk_connect_for_snmp.snmp.manager.multi_bulk_walk_cmd") + @patch("splunk_connect_for_snmp.snmp.manager.get_cmd", new_callable=AsyncMock) @patch("splunk_connect_for_snmp.common.collection_manager.ProfilesManager") - def test_do_work_bulk(self, load_profiles, getCmd, bulkCmd): + async def test_do_work_bulk_varbinds( + self, + load_profiles, + get_cmd, + multi_bulk_walk_cmd, + setup_transport_target, + get_auth, + ): poller = Poller.__new__(Poller) poller.last_modified = 1609675634 poller.snmpEngine = None poller.builder = MagicMock() + poller.mib_view_controller = MagicMock() poller.profiles_manager = MagicMock() - m_process_data = MagicMock() - m_process_data.return_value = (False, [], {}) - poller.process_snmp_data = m_process_data + poller.process_snmp_data = MagicMock(return_value=(False, [], {})) + requested_profiles = ["profile1", "profile2"] poller.profiles = { "profile1": { @@ -78,32 +86,57 @@ def test_do_work_bulk(self, load_profiles, getCmd, bulkCmd): }, "profile2": {"frequency": 20, "varBinds": [["UDP-MIB", "udpOutDatagrams"]]}, } - poller.already_loaded_mibs = {} + poller.already_loaded_mibs = set() poller.profiles_collection = ProfileCollection(poller.profiles) poller.profiles_collection.process_profiles() - bulkCmd.return_value = [(None, 0, 0, "Oid1"), (None, 0, 0, "Oid2")] - poller.do_work(inventory_record, profiles=requested_profiles) + + def multi_bulk_walk_cmd_mock(*args, **kwargs): + async def _gen(): + yield (None, 0, 0, ["Oid1"]) + yield (None, 0, 0, ["Oid2"]) + + return _gen() + + multi_bulk_walk_cmd.side_effect = multi_bulk_walk_cmd_mock + get_auth.return_value = MagicMock() + setup_transport_target.return_value = MagicMock() + + await poller.do_work(inventory_record, profiles=requested_profiles) + self.assertEqual(poller.process_snmp_data.call_count, 2) - self.assertEqual(getCmd.call_count, 0) - self.assertEqual(bulkCmd.call_count, 1) + get_cmd.assert_not_called() + self.assertEqual(multi_bulk_walk_cmd.call_count, 1) - @patch("pymongo.MongoClient", MagicMock()) @patch("mongolock.MongoLock.__init__", MagicMock()) @patch("mongolock.MongoLock.lock", MagicMock()) @patch("mongolock.MongoLock.release", MagicMock()) - @patch("splunk_connect_for_snmp.snmp.auth.get_auth", None) + @patch("splunk_connect_for_snmp.snmp.auth.get_auth", new_callable=AsyncMock) @patch("splunk_connect_for_snmp.snmp.manager.get_context_data", MagicMock()) - @patch("splunk_connect_for_snmp.snmp.manager.setup_transport_target", MagicMock()) - @patch("splunk_connect_for_snmp.snmp.manager.bulkCmd") - @patch("splunk_connect_for_snmp.snmp.manager.getCmd") + @patch( + "splunk_connect_for_snmp.snmp.manager.setup_transport_target", + new_callable=AsyncMock, + ) + @patch( + "splunk_connect_for_snmp.snmp.manager.multi_bulk_walk_cmd", + new_callable=AsyncMock, + ) + @patch("splunk_connect_for_snmp.snmp.manager.get_cmd") @patch( "splunk_connect_for_snmp.common.collection_manager.ProfilesManager.return_collection" ) - def test_do_work_get(self, load_profiles, getCmd, bulkCmd): + async def test_do_work_get( + self, + load_profiles, + get_cmd, + multi_bulk_walk_cmd, + setup_transport_target, + get_auth, + ): poller = Poller.__new__(Poller) poller.last_modified = 1609675634 poller.snmpEngine = None poller.builder = MagicMock() + poller.mib_view_controller = MagicMock() poller.process_snmp_data = MagicMock() poller.profiles_manager = MagicMock() requested_profiles = ["profile1", "profile2"] @@ -117,48 +150,66 @@ def test_do_work_get(self, load_profiles, getCmd, bulkCmd): "varBinds": [["UDP-MIB", "udpOutDatagrams", 1]], }, } - poller.already_loaded_mibs = {} + poller.already_loaded_mibs = set() poller.profiles_collection = ProfileCollection(poller.profiles) poller.profiles_collection.process_profiles() - getCmd.return_value = [ - (None, 0, 0, "Oid1"), - (None, 0, 0, "Oid2"), - (None, 0, 0, "Oid3"), + get_cmd.side_effect = [ + (None, 0, 0, ["Oid1"]), + (None, 0, 0, ["Oid2"]), + (None, 0, 0, ["Oid3"]), ] - poller.do_work(inventory_record, profiles=requested_profiles) - self.assertEqual(poller.process_snmp_data.call_count, 3) - self.assertEqual(getCmd.call_count, 1) - self.assertEqual(bulkCmd.call_count, 0) + await poller.do_work(inventory_record, profiles=requested_profiles) + self.assertEqual(poller.process_snmp_data.call_count, 1) + self.assertEqual(get_cmd.call_count, 1) + self.assertEqual(multi_bulk_walk_cmd.call_count, 0) @patch("pymongo.MongoClient", MagicMock()) @patch("mongolock.MongoLock.__init__", MagicMock()) @patch("mongolock.MongoLock.lock", MagicMock()) @patch("mongolock.MongoLock.release", MagicMock()) - @patch("splunk_connect_for_snmp.snmp.auth.get_auth", None) + @patch("splunk_connect_for_snmp.snmp.auth.get_auth", new_callable=AsyncMock) @patch("splunk_connect_for_snmp.snmp.manager.get_context_data", MagicMock()) - @patch("splunk_connect_for_snmp.snmp.manager.setup_transport_target", MagicMock()) - @patch("splunk_connect_for_snmp.snmp.manager.bulkCmd") - @patch("splunk_connect_for_snmp.snmp.manager.getCmd") + @patch( + "splunk_connect_for_snmp.snmp.manager.setup_transport_target", + new_callable=AsyncMock, + ) + @patch( + "splunk_connect_for_snmp.snmp.manager.multi_bulk_walk_cmd", + new_callable=AsyncMock, + ) + @patch("splunk_connect_for_snmp.snmp.manager.get_cmd", new_callable=AsyncMock) @patch( "splunk_connect_for_snmp.common.collection_manager.ProfilesManager.return_collection" ) - def test_do_work_errors(self, load_profiles, getCmd, bulkCmd): + async def test_do_work_errors( + self, + load_profiles, + get_cmd_mock, + multi_bulk_walk_cmd_mock, + setup_transport_target, + get_auth, + ): poller = Poller.__new__(Poller) poller.last_modified = 1609675634 poller.snmpEngine = None poller.builder = MagicMock() + poller.mib_view_controller = MagicMock() poller.process_snmp_data = MagicMock() poller.profiles_manager = MagicMock() requested_profiles = ["profile1"] poller.profiles = { "profile1": {"frequency": 20, "varBinds": [["IF-MIB", "ifDescr", 1]]} } - poller.already_loaded_mibs = {} + poller.already_loaded_mibs = set() poller.profiles_collection = ProfileCollection(poller.profiles) poller.profiles_collection.process_profiles() - getCmd.return_value = [(True, True, 2, [])] + + async def get_cmd_failure(*args, **kwargs): + return (True, True, 2, []) + + get_cmd_mock.side_effect = get_cmd_failure with self.assertRaises(SnmpActionError): - poller.do_work(inventory_record, profiles=requested_profiles) + await poller.do_work(inventory_record, profiles=requested_profiles) self.assertEqual(poller.process_snmp_data.call_count, 0) - self.assertEqual(getCmd.call_count, 1) - self.assertEqual(bulkCmd.call_count, 0) + self.assertEqual(get_cmd_mock.call_count, 1) + self.assertEqual(multi_bulk_walk_cmd_mock.call_count, 0) diff --git a/test/snmp/test_get_varbinds.py b/test/snmp/test_get_varbinds.py index 000b808cf..a342e3ada 100644 --- a/test/snmp/test_get_varbinds.py +++ b/test/snmp/test_get_varbinds.py @@ -12,7 +12,7 @@ def test_get_varbinds_for_walk(self): poller.profiles_collection.process_profiles() poller.already_loaded_mibs = set() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_varbinds( - "192.168.0.1", walk=True + "192.168.0.1", is_walk=True ) self.assertEqual(0, len(varbinds_get)) @@ -44,10 +44,10 @@ def test_get_varbinds_for_walk_redundant(self): poller.profiles = profiles poller.profiles_collection = ProfileCollection(profiles) poller.profiles_collection.process_profiles() - poller.already_loaded_mibs = {} + poller.already_loaded_mibs = set() poller.load_mibs = Mock() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_varbinds( - "192.168.0.1", walk=True, profiles=["test1"] + "192.168.0.1", is_walk=True, profiles=["test1"] ) self.assertEqual(1, len(varbinds_get)) self.assertEqual(3, len(varbinds_bulk)) @@ -78,10 +78,10 @@ def test_get_varbinds_for_walk_none(self): poller.profiles = profiles poller.profiles_collection = ProfileCollection(profiles) poller.profiles_collection.process_profiles() - poller.already_loaded_mibs = {} + poller.already_loaded_mibs = set() poller.load_mibs = Mock() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_varbinds( - "192.168.0.1", walk=True, profiles=["test1"] + "192.168.0.1", is_walk=True, profiles=["test1"] ) self.assertEqual(0, len(varbinds_get)) self.assertEqual(1, len(varbinds_bulk)) @@ -110,10 +110,10 @@ def test_get_varbinds_for_walk_with_three_profiles(self): poller.profiles = profiles poller.profiles_collection = ProfileCollection(profiles) poller.profiles_collection.process_profiles() - poller.already_loaded_mibs = {} + poller.already_loaded_mibs = set() poller.load_mibs = Mock() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_varbinds( - "192.168.0.1", walk=True, profiles=["test1"] + "192.168.0.1", is_walk=True, profiles=["test1"] ) self.assertEqual(0, len(varbinds_get)) self.assertEqual(4, len(varbinds_bulk)) @@ -136,7 +136,7 @@ def test_get_varbinds_for_walk_next_time_no_profiles(self): poller = Poller.__new__(Poller) poller.profiles_collection = ProfileCollection({}) varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_varbinds( - "192.168.0.1", walk=True, profiles=[] + "192.168.0.1", is_walk=True, profiles=[] ) self.assertEqual(0, len(varbinds_get)) @@ -172,7 +172,7 @@ def test_get_varbinds_for_walk_with_profiles(self): poller.load_mibs = Mock() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_varbinds( - "192.168.0.1", walk=True, profiles=["profile1"] + "192.168.0.1", is_walk=True, profiles=["profile1"] ) self.assertEqual(0, len(varbinds_get)) @@ -214,7 +214,7 @@ def test_get_varbinds_for_walk_with_profiles_changed_sequence(self): poller.load_mibs = Mock() varbinds_get, get_mapping, varbinds_bulk, bulk_mapping = poller.get_varbinds( - "192.168.0.1", walk=True, profiles=["profile2"] + "192.168.0.1", is_walk=True, profiles=["profile2"] ) self.assertEqual(0, len(varbinds_get)) diff --git a/test/snmp/test_mibs.py b/test/snmp/test_mibs.py index 142592c1f..d71e70e8d 100644 --- a/test/snmp/test_mibs.py +++ b/test/snmp/test_mibs.py @@ -11,7 +11,7 @@ def test_load_mib(self): poller = Poller.__new__(Poller) poller.builder = Mock() poller.load_mibs(["a", "b", "c"]) - calls = poller.builder.loadModules.call_args_list + calls = poller.builder.load_modules.call_args_list self.assertEqual("a", calls[0][0][0]) self.assertEqual("b", calls[1][0][0]) @@ -50,7 +50,7 @@ def test_is_mib_resolved(self): def test_exception_during_loading(self): poller = Poller.__new__(Poller) poller.builder = Mock() - poller.builder.loadModules.side_effect = error.MibLoadError() + poller.builder.load_modules.side_effect = error.MibLoadError() poller.load_mibs(["a"]) def test_find_new_mibs_is_found(self): diff --git a/test/snmp/test_process_snmp_data.py b/test/snmp/test_process_snmp_data.py index b3efe092b..0eed82238 100644 --- a/test/snmp/test_process_snmp_data.py +++ b/test/snmp/test_process_snmp_data.py @@ -2,6 +2,9 @@ from unittest import TestCase from unittest.mock import MagicMock, Mock, patch +from pysnmp.entity.engine import SnmpEngine +from pysnmp.smi import view + from splunk_connect_for_snmp.snmp.manager import Poller @@ -22,6 +25,9 @@ def test_multiple_metrics_single_group( m_resolved, ): poller = Poller.__new__(Poller) + poller.snmpEngine = SnmpEngine() + poller.builder = poller.snmpEngine.get_mib_builder() + poller.mib_view_controller = view.MibViewController(poller.builder) m_resolved.return_value = True m_get_group_key.return_value = "QWERTYUIOP" @@ -36,15 +42,15 @@ def test_multiple_metrics_single_group( varbind_mock2_1 = Mock() varbind_mock2_2 = Mock() - varbind_mock1_1.getMibSymbol.return_value = "IF-MIB", "some_metric", 1 + varbind_mock1_1.get_mib_symbol.return_value = "IF-MIB", "some_metric", 1 varbind_mock1_1.prettyPrint.return_value = "some text" - varbind_mock1_1.getOid.return_value = "1.2.3.4.5.6.7" + varbind_mock1_1.get_oid.return_value = "1.2.3.4.5.6.7" varbind_mock1_2.prettyPrint.return_value = 65 - varbind_mock2_1.getMibSymbol.return_value = "UDP-MIB", "next_metric", 1 + varbind_mock2_1.get_mib_symbol.return_value = "UDP-MIB", "next_metric", 1 varbind_mock2_1.prettyPrint.return_value = "some text2" - varbind_mock2_1.getOid.return_value = "9.8.7.6" + varbind_mock2_1.get_oid.return_value = "9.8.7.6" varbind_mock2_2.prettyPrint.return_value = 123 @@ -97,6 +103,9 @@ def test_multiple_metrics_multiple_groups( m_resolved, ): poller = Poller.__new__(Poller) + poller.snmpEngine = SnmpEngine() + poller.builder = poller.snmpEngine.get_mib_builder() + poller.mib_view_controller = view.MibViewController(poller.builder) m_resolved.return_value = True m_get_group_key.side_effect = ["GROUP1", "GROUP2"] @@ -111,15 +120,15 @@ def test_multiple_metrics_multiple_groups( varbind_mock2_1 = Mock() varbind_mock2_2 = Mock() - varbind_mock1_1.getMibSymbol.return_value = "IF-MIB", "some_metric", 1 + varbind_mock1_1.get_mib_symbol.return_value = "IF-MIB", "some_metric", 1 varbind_mock1_1.prettyPrint.return_value = "some text" - varbind_mock1_1.getOid.return_value = "1.2.3.4.5.6.7" + varbind_mock1_1.get_oid.return_value = "1.2.3.4.5.6.7" varbind_mock1_2.prettyPrint.return_value = 65 - varbind_mock2_1.getMibSymbol.return_value = "UDP-MIB", "next_metric", 1 + varbind_mock2_1.get_mib_symbol.return_value = "UDP-MIB", "next_metric", 1 varbind_mock2_1.prettyPrint.return_value = "some text2" - varbind_mock2_1.getOid.return_value = "9.8.7.6" + varbind_mock2_1.get_oid.return_value = "9.8.7.6" varbind_mock2_2.prettyPrint.return_value = 123 @@ -192,15 +201,15 @@ def test_metrics_and_fields( varbind_mock2_1 = Mock() varbind_mock2_2 = Mock() - varbind_mock1_1.getMibSymbol.return_value = "IF-MIB", "some_metric", 1 + varbind_mock1_1.get_mib_symbol.return_value = "IF-MIB", "some_metric", 1 varbind_mock1_1.prettyPrint.return_value = "some text" - varbind_mock1_1.getOid.return_value = "1.2.3.4.5.6.7" + varbind_mock1_1.get_oid.return_value = "1.2.3.4.5.6.7" varbind_mock1_2.prettyPrint.return_value = 65 - varbind_mock2_1.getMibSymbol.return_value = "UDP-MIB", "some_field", 1 + varbind_mock2_1.get_mib_symbol.return_value = "UDP-MIB", "some_field", 1 varbind_mock2_1.prettyPrint.return_value = "some text2" - varbind_mock2_1.getOid.return_value = "9.8.7.6" + varbind_mock2_1.get_oid.return_value = "9.8.7.6" varbind_mock2_2.prettyPrint.return_value = "up and running" @@ -254,6 +263,9 @@ def test_metrics_with_profile( m_resolved, ): poller = Poller.__new__(Poller) + poller.snmpEngine = SnmpEngine() + poller.builder = poller.snmpEngine.get_mib_builder() + poller.mib_view_controller = view.MibViewController(poller.builder) m_resolved.return_value = True m_get_group_key.return_value = "QWERTYUIOP" @@ -268,15 +280,15 @@ def test_metrics_with_profile( varbind_mock2_1 = Mock() varbind_mock2_2 = Mock() - varbind_mock1_1.getMibSymbol.return_value = "IF-MIB", "some_metric", 1 + varbind_mock1_1.get_mib_symbol.return_value = "IF-MIB", "some_metric", 1 varbind_mock1_1.prettyPrint.return_value = "some text" - varbind_mock1_1.getOid.return_value = "1.2.3.4.5.6.7" + varbind_mock1_1.get_oid.return_value = "1.2.3.4.5.6.7" varbind_mock1_2.prettyPrint.return_value = 65 - varbind_mock2_1.getMibSymbol.return_value = "UDP-MIB", "next_metric", 1 + varbind_mock2_1.get_mib_symbol.return_value = "UDP-MIB", "next_metric", 1 varbind_mock2_1.prettyPrint.return_value = "some text2" - varbind_mock2_1.getOid.return_value = "9.8.7.6" + varbind_mock2_1.get_oid.return_value = "9.8.7.6" varbind_mock2_2.prettyPrint.return_value = 123 diff --git a/test/snmp/test_tasks.py b/test/snmp/test_tasks.py index 13f641e04..b32582438 100644 --- a/test/snmp/test_tasks.py +++ b/test/snmp/test_tasks.py @@ -1,5 +1,5 @@ from unittest import TestCase -from unittest.mock import MagicMock, patch +from unittest.mock import AsyncMock, MagicMock, patch from pysnmp.smi.error import SmiError @@ -8,7 +8,9 @@ class TestTasks(TestCase): @patch("splunk_connect_for_snmp.snmp.manager.get_inventory") @patch("splunk_connect_for_snmp.snmp.manager.Poller.__init__") - @patch("splunk_connect_for_snmp.snmp.manager.Poller.do_work") + @patch( + "splunk_connect_for_snmp.snmp.manager.Poller.do_work", new_callable=AsyncMock + ) @patch("time.time") def test_walk( self, @@ -42,7 +44,9 @@ def test_walk( @patch("splunk_connect_for_snmp.snmp.manager.get_inventory") @patch("splunk_connect_for_snmp.snmp.manager.Poller.__init__") - @patch("splunk_connect_for_snmp.snmp.manager.Poller.do_work") + @patch( + "splunk_connect_for_snmp.snmp.manager.Poller.do_work", new_callable=AsyncMock + ) @patch("time.time") def test_poll_with_group( self, @@ -81,7 +85,9 @@ def test_poll_with_group( @patch("splunk_connect_for_snmp.snmp.manager.get_inventory") @patch("splunk_connect_for_snmp.snmp.manager.Poller.__init__") - @patch("splunk_connect_for_snmp.snmp.manager.Poller.do_work") + @patch( + "splunk_connect_for_snmp.snmp.manager.Poller.do_work", new_callable=AsyncMock + ) @patch("time.time") def test_walk_with_group( self, @@ -117,7 +123,7 @@ def test_walk_with_group( result, ) - @patch("pysnmp.smi.rfc1902.ObjectType.resolveWithMib") + @patch("pysnmp.smi.rfc1902.ObjectType.resolve_with_mib") @patch("splunk_connect_for_snmp.snmp.manager.Poller.process_snmp_data") @patch("splunk_connect_for_snmp.snmp.manager.Poller.__init__") @patch("time.time") @@ -154,7 +160,7 @@ def test_trap( result, ) - @patch("pysnmp.smi.rfc1902.ObjectType.resolveWithMib") + @patch("pysnmp.smi.rfc1902.ObjectType.resolve_with_mib") @patch("splunk_connect_for_snmp.snmp.manager.Poller.process_snmp_data") @patch("splunk_connect_for_snmp.snmp.manager.Poller.__init__") @patch("time.time") @@ -196,7 +202,7 @@ def test_trap_with_context_engine_id( result, ) - @patch("pysnmp.smi.rfc1902.ObjectType.resolveWithMib") + @patch("pysnmp.smi.rfc1902.ObjectType.resolve_with_mib") @patch("splunk_connect_for_snmp.snmp.manager.Poller.process_snmp_data") @patch("splunk_connect_for_snmp.snmp.manager.Poller.is_mib_known") @patch("splunk_connect_for_snmp.snmp.manager.Poller.load_mibs") @@ -243,7 +249,7 @@ def test_trap_retry_translation( result, ) - @patch("pysnmp.smi.rfc1902.ObjectType.resolveWithMib") + @patch("pysnmp.smi.rfc1902.ObjectType.resolve_with_mib") @patch("splunk_connect_for_snmp.snmp.manager.Poller.process_snmp_data") @patch("splunk_connect_for_snmp.snmp.manager.Poller.is_mib_known") @patch("splunk_connect_for_snmp.snmp.manager.Poller.load_mibs") @@ -293,7 +299,7 @@ def test_trap_retry_translation_failed( @patch("splunk_connect_for_snmp.snmp.tasks.RESOLVE_TRAP_ADDRESS", "true") @patch("splunk_connect_for_snmp.snmp.tasks.resolve_address") - @patch("pysnmp.smi.rfc1902.ObjectType.resolveWithMib") + @patch("pysnmp.smi.rfc1902.ObjectType.resolve_with_mib") @patch("splunk_connect_for_snmp.snmp.manager.Poller.process_snmp_data") @patch("splunk_connect_for_snmp.snmp.manager.Poller.__init__") @patch("time.time") diff --git a/test/snmp/test_utils.py b/test/snmp/test_utils.py index 095f2ddfe..5498c0c4f 100644 --- a/test/snmp/test_utils.py +++ b/test/snmp/test_utils.py @@ -142,7 +142,7 @@ def test_get_context_data(self): result = get_context_data() self.assertIsNone(result.contextEngineId) - self.assertEqual("", result.contextName) + self.assertEqual(b"", result.contextName) def test_return_address_and_port(self): self.assertEqual(return_address_and_port("127.0.0.1"), ("127.0.0.1", 161)) diff --git a/test/test_walk.py b/test/test_walk.py index 87eaceea1..f220e4080 100644 --- a/test/test_walk.py +++ b/test/test_walk.py @@ -1,5 +1,5 @@ -from unittest import TestCase -from unittest.mock import mock_open, patch +from unittest import IsolatedAsyncioTestCase +from unittest.mock import AsyncMock, mock_open, patch from splunk_connect_for_snmp.walk import run_walk @@ -9,18 +9,20 @@ 192.178.0.1,,2c,public,,,1804,test_1,True,False""" -class TestWalk(TestCase): +class TestWalk(IsolatedAsyncioTestCase): @patch("builtins.open", new_callable=mock_open, read_data=mock_inventory) @patch("splunk_connect_for_snmp.snmp.manager.Poller.__init__") - @patch("splunk_connect_for_snmp.snmp.manager.Poller.do_work") + @patch( + "splunk_connect_for_snmp.snmp.manager.Poller.do_work", new_callable=AsyncMock + ) @patch( "splunk_connect_for_snmp.common.collection_manager.ProfilesManager.return_collection" ) - def test_run_walk(self, m_load_profiles, m_do_work, m_init, m_open): + async def test_run_walk(self, m_load_profiles, m_do_work, m_init, m_open): m_init.return_value = None m_do_work.return_value = (False, {}) - run_walk() + await run_walk() calls = m_do_work.call_args_list @@ -34,15 +36,17 @@ def test_run_walk(self, m_load_profiles, m_do_work, m_init, m_open): @patch("builtins.open", new_callable=mock_open, read_data=mock_inventory) @patch("splunk_connect_for_snmp.snmp.manager.Poller.__init__") - @patch("splunk_connect_for_snmp.snmp.manager.Poller.do_work") + @patch( + "splunk_connect_for_snmp.snmp.manager.Poller.do_work", new_callable=AsyncMock + ) @patch( "splunk_connect_for_snmp.common.collection_manager.ProfilesManager.return_collection" ) - def test_run_walk_exception(self, m_load_profiles, m_do_work, m_init, m_open): + async def test_run_walk_exception(self, m_load_profiles, m_do_work, m_init, m_open): m_init.return_value = None m_do_work.side_effect = (Exception("Boom!"), (False, {})) - run_walk() + await run_walk() calls = m_do_work.call_args_list