diff --git a/.flake8 b/.flake8 index e21fafd..317d19b 100644 --- a/.flake8 +++ b/.flake8 @@ -2,5 +2,12 @@ # F401 'module' imported but unused # E501 line too long (83 > 79 characters) +# F841 local variable 'pc' is assigned to but never used exclude = .git,.pycache,build,.eggs + +per-file-ignores = + ./tests/cloudevent_receiver_server.py: E501 + # remove when implemented: + ./src/actinia_cloudevent_plugin/api/cloudevent.py: E501 + ./src/actinia_cloudevent_plugin/core/processing.py: F841, E501 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9e458e2..08d36e7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -11,28 +11,28 @@ on: jobs: - unittests-G84: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Replace run only unittest command - run: | - sed -i "s+# RUN make test+RUN make unittest+g" docker/actinia-cloudevent-plugin-test/Dockerfile - - name: Unittests of actinia-cloudevent-plugin - id: docker_build - uses: docker/build-push-action@v6 - with: - push: false - tags: actinia-cloudevent-plugin-tests:alpine - context: . - file: docker/actinia-cloudevent-plugin-test/Dockerfile - no-cache: true - # pull: true + # unittests: + # runs-on: ubuntu-latest + # steps: + # - name: Checkout + # uses: actions/checkout@v4 + # - name: Set up Docker Buildx + # uses: docker/setup-buildx-action@v3 + # - name: Replace run only unittest command + # run: | + # sed -i "s+# RUN make test+RUN make unittest+g" docker/Dockerfile + # - name: Unittests of actinia-cloudevent-plugin + # id: docker_build + # uses: docker/build-push-action@v6 + # with: + # push: false + # tags: actinia-cloudevent-plugin-tests:alpine + # context: . + # file: docker/Dockerfile + # no-cache: true + # # pull: true - integration-tests-G84: + integration-tests: runs-on: ubuntu-latest steps: - name: Checkout @@ -43,57 +43,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Replace run integration test command run: | - sed -i "s+# RUN make test+RUN make integrationtest+g" docker/actinia-cloudevent-plugin-test/Dockerfile - - name: Integration tests of actinia-cloudevent-plugin - id: docker_build - uses: docker/build-push-action@v6 - with: - push: false - tags: actinia-cloudevent-plugin-test:alpine - context: . - file: docker/actinia-cloudevent-plugin-test/Dockerfile - no-cache: true - # pull: true - - unittests-G83: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Replace actinia version (including GRASS version) - run: | - sed -i "s+mundialis/actinia:latest+mundialis/actinia:grass8.3+g" docker/actinia-cloudevent-plugin-test/Dockerfile - - name: Replace run only unittest command - run: | - sed -i "s+# RUN make test+RUN make unittest+g" docker/actinia-cloudevent-plugin-test/Dockerfile - - name: Unittests of actinia-cloudevent-plugin - id: docker_build - uses: docker/build-push-action@v6 - with: - push: false - tags: actinia-cloudevent-plugin-tests:alpine - context: . - file: docker/actinia-cloudevent-plugin-test/Dockerfile - no-cache: true - # pull: true - - integration-tests-G83: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - # with: - # path: "." - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Replace actinia version (including GRASS version) - run: | - sed -i "s+mundialis/actinia:latest+mundialis/actinia:grass8.3+g" docker/actinia-cloudevent-plugin-test/Dockerfile - - name: Replace run integration test command - run: | - sed -i "s+# RUN make test+RUN make integrationtest+g" docker/actinia-cloudevent-plugin-test/Dockerfile + sed -i "s+# RUN make test+RUN make integrationtest+g" docker/Dockerfile - name: Integration tests of actinia-cloudevent-plugin id: docker_build uses: docker/build-push-action@v6 @@ -101,6 +51,6 @@ jobs: push: false tags: actinia-cloudevent-plugin-test:alpine context: . - file: docker/actinia-cloudevent-plugin-test/Dockerfile + file: docker/Dockerfile no-cache: true # pull: true diff --git a/.gitignore b/.gitignore index 3854875..4ee5083 100644 --- a/.gitignore +++ b/.gitignore @@ -48,3 +48,6 @@ docker/valkey_data/dump.rdb .pylintrc_allowed_to_fail ruff-github-workflows.toml ruff-merged.toml + +# Development env +config/*.cfg diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..6c9cd83 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,53 @@ +# For documentation about this config, see +# https://pylint.readthedocs.io/en/stable/user_guide/configuration/all-options.html (as of writing, version 2.17.4) + +[MAIN] + +exit-zero=yes + +jobs=0 # Default: 1 + +load-plugins= + pylint.extensions.broad_try_clause + +recursive=yes # Default: False + +# score=no # Default: True + +[BROAD_TRY_CLAUSE] +max-try-statements=4 # Default: 1 + +[FORMAT] +max-line-length=80 # Default: 100 +max-module-lines=800 # Default: 1000 +ignore-long-lines=.*COPYRIGHT:.* |# . description:.*|\s*(# )?.*http.:\/\/\S+?|# %%* + +[MESSAGES CONTROL] +# C0301: Line too long (already captured by other linters) +disable=C0301 + +[VARIABLES] +additional-builtins=_ # Default: () + + +; [DESIGN] + +; # Maximum number of arguments for function / method. +; # Default: 5 +; max-args=9 + +; # Maximum number of attributes for a class (see R0902). +; # Default: 7 +; max-attributes=11 + +; # Maximum number of branch for function / method body. +; # Default: 12 +; max-branches=15 + +; # Maximum number of locals for function / method body. +; # Default: 15 +; max-locals=19 + +; # Maximum number of return / yield for function / method body. +; # Default: 6 +; max-returns=11 diff --git a/Makefile b/Makefile index a5e78a5..ac19065 100644 --- a/Makefile +++ b/Makefile @@ -19,13 +19,13 @@ dist: python3 setup.py dist test: - ./tests_with_kvdb.sh + ./tests_with_cloudevent_receiver.sh unittest: - ./tests_with_kvdb.sh unittest + ./tests_with_cloudevent_receiver.sh unittest devtest: - ./tests_with_kvdb.sh dev + ./tests_with_cloudevent_receiver.sh dev integrationtest: - ./tests_with_kvdb.sh integrationtest + ./tests_with_cloudevent_receiver.sh integrationtest diff --git a/README.md b/README.md index c1441b1..5b72c2c 100644 --- a/README.md +++ b/README.md @@ -1,76 +1,90 @@ # actinia-cloudevent-plugin -This is an plugin for [actinia-core](https://github.com/mundialis/actinia_core) which adds cloudevent endpoints to actinia-core. +This is a plugin for [actinia-core](https://github.com/mundialis/actinia_core) which adds cloudevent endpoints and runs as standalone app. -You can run actinia-cloudevent-plugin as an actinia-core plugin. +## Installation and Setup -## Installation Use docker-compose for installation: ```bash docker compose -f docker/docker-compose.yml build -docker compose -f docker/docker-compose.yml up -d +docker compose -f docker/docker-compose.yml run --rm --service-ports --entrypoint sh actinia-cloudevent +# within docker +gunicorn -b 0.0.0.0:5000 -w 8 --access-logfile=- -k gthread actinia_cloudevent_plugin.main:flask_app +``` + +### DEV setup +```bash +# Uncomment the volume mount of the cloud-event-plugin within docker/docker-compose.yml, +# then: +docker compose -f docker/docker-compose.yml build +docker compose -f docker/docker-compose.yml run --rm --service-ports --entrypoint sh actinia-cloudevent +# within docker: +# install the plugin +pip3 install . +# start flask app with actinia-cloudevent-plugin +python3 -m actinia_cloudevent_plugin.main ``` ### Installation hints * If you get an error like: `ERROR: for docker_kvdb_1 Cannot start service valkey: network xxx not found` you can try the following: ```bash -docker compose -f docker/docker-compose.yml down +docker compose -f docker/docker-compose-dev.yml down # remove all custom networks not used by a container docker network prune -docker compose -f docker/docker-compose.yml up -d +docker compose -f docker/docker-compose-dev.yml up -d ``` -### Requesting helloworld endpoint -You can test the plugin and request the `/helloworld` endpoint, e.g. with: -```bash -curl -u actinia-gdi:actinia-gdi -X GET http://localhost:8088/api/v3/helloworld | jq +## Configuration -curl -u actinia-gdi:actinia-gdi -H 'accept: application/json' -H 'Content-Type: application/json' -X POST http://localhost:8088/api/v3/helloworld -d '{"name": "test"}' | jq -``` +- the URL of the cloudevent receiver is defined within [config/mount/sample.ini](config/mount/sample.ini): `[EVENTRECEIVER]` (Default value defined within [src/actinia_cloudevent_plugin/resources/config.py](src/actinia_cloudevent_plugin/resources/config.py)) + +## Requesting endpoint -## DEV setup -For a DEV setup you can use the docker/docker-compose.yml: +**Note**: Assuming cloudevent-plugin is running as described in previous setup. + +You can test the plugin and request the `/` endpoint, e.g. with: ```bash -docker compose -f docker/docker-compose.yml build -docker compose -f docker/docker-compose.yml run --rm --service-ports --entrypoint sh actinia +# Start server for receiving of cloudevents (which are returned as response) +# NOTE: as defined within config/mount/sample.ini: [EVENTRECEIVER] +python3 tests/cloudevent_receiver_server.py -# install the plugin -(cd /src/actinia-cloudevent-plugin && python3 setup.py install) -# start actinia-core with your plugin -sh /src/start.sh -# gunicorn -b 0.0.0.0:8088 -w 1 --access-logfile=- -k gthread actinia_core.main:flask_app +# In another terminal +JSON=tests/examples/cloudevent_example.json +curl -X POST -H 'Content-Type: application/json' --data @$JSON localhost:5000/api/v1/ | jq ``` -### Hints +Exemplary returned cloudevent: [tests/examples/cloudevent_example_return.json](tests/examples/cloudevent_example_return.json) + +## Hints * If you have no `.git` folder in the plugin folder, you need to set the `SETUPTOOLS_SCM_PRETEND_VERSION` before installing the plugin: -```bash -export SETUPTOOLS_SCM_PRETEND_VERSION=0.0 -``` -Otherwise you will get an error like this -`LookupError: setuptools-scm was unable to detect version for '/src/actinia-cloudevent-plugin'.`. + ```bash + export SETUPTOOLS_SCM_PRETEND_VERSION=0.0 + ``` + Otherwise you will get an error like this `LookupError: setuptools-scm was unable to detect version for '/src/actinia-cloudevent-plugin'.`. * If you make changes in code and nothing changes you can try to uninstall the plugin: -```bash -pip3 uninstall actinia-cloudevent-plugin.wsgi -y -rm -rf /usr/lib/python3.8/site-packages/actinia_cloudevent_plugin.wsgi-*.egg -``` + ```bash + pip3 uninstall actinia-cloudevent-plugin.wsgi -y + rm -rf /usr/lib/python3.8/site-packages/actinia_cloudevent_plugin.wsgi-*.egg + ``` -### Running tests +## Running tests You can run the tests in the actinia test docker: ```bash -docker build -f docker/actinia-cloudevent-plugin-test/Dockerfile -t actinia-cloudevent-plugin-test . -docker run -it actinia-cloudevent-plugin-test -i - -cd /src/actinia-cloudevent-plugin/ +# Uncomment the volume mount of the cloud-event-plugin within docker/docker-compose.yml, +# then: +docker compose -f docker/docker-compose.yml build +docker compose -f docker/docker-compose.yml run --rm --service-ports --entrypoint sh actinia-cloudevent # run all tests make test -# run only unittests -make unittest +# # run only unittests +# make unittest + # run only integrationtests make integrationtest diff --git a/config/mount/sample.ini b/config/mount/sample.ini new file mode 100644 index 0000000..063605a --- /dev/null +++ b/config/mount/sample.ini @@ -0,0 +1,7 @@ +[EVENTRECEIVER] +url = http://localhost:3000/ + +[LOGCONFIG] +logfile = actinia-cloudevent-plugin.log +level = DEBUG +type = json diff --git a/docker/Dockerfile b/docker/Dockerfile index 56f50e1..d53eca4 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,19 +1,24 @@ -# Exception for hadolint-docker-linter: -# DL3007: using latest is prone to errors if the image will ever update. Pin the version explicitly to a release tag -# hadolint ignore=DL3007 -FROM mundialis/actinia:latest +FROM alpine:3.21 -# pwgen is needed for the tests -RUN pip3 install --no-cache-dir pwgen==0.8.2.post0 +# python3 + pip3 +# hadolint ignore=DL3018 +RUN apk update; \ + apk add --no-cache python3 python3-dev make +ENV PATH="/opt/venv/bin:$PATH" +RUN /usr/bin/python -m venv --system-site-packages --without-pip /opt/venv +# hadolint ignore=DL3013 +RUN python -m ensurepip && pip3 install --no-cache-dir --upgrade pip pep517 wheel -COPY docker/actinia.cfg /etc/default/actinia -COPY src /src/actinia-cloudevent-plugin/src/ -COPY setup.cfg /src/actinia-cloudevent-plugin/ -COPY setup.py /src/actinia-cloudevent-plugin/ -COPY requirements.txt /src/actinia-cloudevent-plugin/ +# gunicorn +# hadolint ignore=DL3013 +RUN pip3 install --no-cache-dir gunicorn + +# needed for tests +# hadolint ignore=DL3013 +RUN pip3 install --no-cache-dir setuptools pwgen==0.8.2.post0 pytest==8.3.5 pytest-cov==6.0.0 + +COPY . /src/actinia-cloudevent-plugin/ -RUN pip3 install --no-cache-dir -r /src/actinia-cloudevent-plugin/requirements.txt && \ - pip3 uninstall actinia-cloudevent-plugin.wsgi -y # SETUPTOOLS_SCM_PRETEND_VERSION is only needed if in the plugin folder is no # .git folder ENV SETUPTOOLS_SCM_PRETEND_VERSION=0.0 @@ -21,4 +26,6 @@ ENV SETUPTOOLS_SCM_PRETEND_VERSION=0.0 WORKDIR /src/actinia-cloudevent-plugin RUN pip3 install --no-cache-dir -e . -WORKDIR /src/actinia_core +# For tests: +RUN chmod a+x tests_with_cloudevent_receiver.sh && make install +# RUN make test diff --git a/docker/actinia-cloudevent-plugin-test/Dockerfile b/docker/actinia-cloudevent-plugin-test/Dockerfile deleted file mode 100644 index f1da0ce..0000000 --- a/docker/actinia-cloudevent-plugin-test/Dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -# Exception for hadolint-docker-linter: -# DL3007: using latest is prone to errors if the image will ever update. Pin the version explicitly to a release tag -# hadolint ignore=DL3007 -FROM mundialis/actinia:latest AS actinia_test - -LABEL authors="Carmen Tawalika,Anika Weinmann" -LABEL maintainer="tawalika@mundialis.de,weinmann@mundialis.de" - -ENV ACTINIA_CUSTOM_TEST_CFG=/etc/default/actinia-cloudevent-plugin-test - -# TODO do not set DEFAULT_CONFIG_PATH if this is fixed -ENV DEFAULT_CONFIG_PATH=/etc/default/actinia-cloudevent-plugin-test - -# install things only for tests -# DL3018: Pin versions in apk add -# hadolint ignore=DL3018 -RUN apk add --no-cache valkey && \ - pip3 install --no-cache-dir iniconfig==2.0.0 colorlog==6.8.2 pwgen==0.8.2.post0 pytest==8.3.5 pytest-cov==6.0.0 - -# COPY docker/actinia-cloudevent-plugin-test/start.sh /src/start.sh - -ENTRYPOINT ["/bin/sh"] -CMD ["/src/start.sh"] - -# # add data for tests -# RUN wget --quiet https://grass.osgeo.org/sampledata/north_carolina/nc_spm_08_micro.zip && \ -# unzip nc_spm_08_micro.zip && \ -# rm -f nc_spm_08_micro.zip && \ -# mv nc_spm_08_micro /actinia_core/grassdb/nc_spm_08 -# RUN grass -e -c 'EPSG:4326' /actinia_core/grassdb/latlong_wgs84 - -# copy needed files and configs for test -COPY docker/actinia-cloudevent-plugin-test/actinia-cloudevent-plugin-test.cfg /etc/default/actinia -COPY docker/actinia-cloudevent-plugin-test/actinia-cloudevent-plugin-test.cfg /etc/default/actinia-cloudevent-plugin-test -COPY . /src/actinia-cloudevent-plugin/ - -WORKDIR /src/actinia-cloudevent-plugin/ -RUN pip3 install --no-cache-dir -e . - -RUN chmod a+x tests_with_kvdb.sh && make install - -# RUN make test diff --git a/docker/actinia-cloudevent-plugin-test/actinia-cloudevent-plugin-test.cfg b/docker/actinia-cloudevent-plugin-test/actinia-cloudevent-plugin-test.cfg deleted file mode 100644 index 0a30eee..0000000 --- a/docker/actinia-cloudevent-plugin-test/actinia-cloudevent-plugin-test.cfg +++ /dev/null @@ -1,24 +0,0 @@ -[GRASS] -grass_database = /actinia_core/grassdb -grass_user_database = /actinia_core/userdata -grass_tmp_database = /actinia_core/workspace/temp_db -grass_resource_dir = /actinia_core/resources -grass_gis_base = /usr/local/grass -grass_gis_start_script = /usr/local/bin/grass -grass_addon_path = /root/.grass8/addons/ - -[API] -plugins = ["actinia_cloudevent_plugin"] -force_https_urls = True - -[KVDB] -kvdb_server_url = localhost -kvdb_server_port = 6379 -worker_queue_name = actinia_job -worker_logfile = /actinia_core/workspace/tmp/actinia_worker_test.log - -[MISC] -tmp_workdir = /actinia_core/workspace/tmp -download_cache = /actinia_core/workspace/download_cache -secret_key = token_signing_key_changeme -save_interim_results = False diff --git a/docker/actinia.cfg b/docker/actinia.cfg deleted file mode 100644 index 69d9aef..0000000 --- a/docker/actinia.cfg +++ /dev/null @@ -1,28 +0,0 @@ -[GRASS] -grass_database = /actinia_core/grassdb -grass_user_database = /actinia_core/userdata -grass_tmp_database = /actinia_core/workspace/temp_db -grass_resource_dir = /actinia_core/resources -grass_gis_base = /usr/local/grass -grass_gis_start_script = /usr/local/bin/grass -grass_addon_path = /root/.grass8/addons/ - -[API] -plugins = ["actinia_cloudevent_plugin"] -force_https_urls = False - -[KVDB] -kvdb_server_url = valkey -kvdb_server_pw = pass -kvdb_resource_expire_time = 864001 -worker_logfile = /actinia_core/workspace/tmp/actinia_worker.log - -[LOGGING] -log_stdout_format = colored -log_level = 3 - -[MISC] -tmp_workdir = /actinia_core/workspace/tmp -download_cache = /actinia_core/workspace/download_cache -secret_key = token_signing_key_changeme -save_interim_results = True diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 190fd83..39b7c70 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,42 +1,14 @@ -version: "3" services: - actinia: + actinia-cloudevent: build: context: .. dockerfile: docker/Dockerfile - volumes: - - ..:/src/actinia-cloudevent-plugin/. - ports: - - "8088:8088" - depends_on: - - valkey + # -- For dev-setup and/or tests mount plugin src code: + # volumes: + # - ..:/src/actinia-cloudevent-plugin/. cap_add: - SYS_PTRACE - networks: - - actinia - - valkey: - image: valkey/valkey:8.1-alpine - volumes: - - ./valkey_data:/data - environment: - - VALKEY_PASS_FILE=/data/config/.valkey - command: [ - "sh", "-c", - ' - docker-entrypoint.sh - "/data/config/valkey.conf" - --requirepass "$$(cat $$VALKEY_PASS_FILE)" - ' - ] ports: - - "6379:6379" - networks: - - actinia - -networks: - actinia: - ipam: - config: - - subnet: 172.18.0.0/16 + - "5000:5000" + network_mode: "host" diff --git a/docker/valkey_data/config/.valkey b/docker/valkey_data/config/.valkey deleted file mode 100644 index 2ae2839..0000000 --- a/docker/valkey_data/config/.valkey +++ /dev/null @@ -1 +0,0 @@ -pass diff --git a/docker/valkey_data/config/valkey.conf b/docker/valkey_data/config/valkey.conf deleted file mode 100644 index fb0203d..0000000 --- a/docker/valkey_data/config/valkey.conf +++ /dev/null @@ -1,2420 +0,0 @@ -# Valkey configuration file example. -# -# Note that in order to read the configuration file, the server must be -# started with the file path as first argument: -# -# ./valkey-server /path/to/valkey.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all servers but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Note that option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Sentinel. Since the server always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# Included paths may contain wildcards. All files matching the wildcards will -# be included in alphabetical order. -# Note that if an include path contains a wildcards but no files match it when -# the server is started, the include statement will be ignored and no error will -# be emitted. It is safe, therefore, to include wildcard files from empty -# directories. -# -# include /path/to/local.conf -# include /path/to/other.conf -# include /path/to/fragments/*.conf -# - -################################## MODULES ##################################### - -# Load modules at startup. If the server is not able to load modules -# it will abort. It is possible to use multiple loadmodule directives. -# -# loadmodule /path/to/my_module.so -# loadmodule /path/to/other_module.so -# loadmodule /path/to/args_module.so [arg [arg ...]] - -################################## NETWORK ##################################### - -# By default, if no "bind" configuration directive is specified, the server listens -# for connections from all available network interfaces on the host machine. -# It is possible to listen to just one or multiple selected interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# Each address can be prefixed by "-", which means that the server will not fail to -# start if the address is not available. Being not available only refers to -# addresses that does not correspond to any network interface. Addresses that -# are already in use will always fail, and unsupported protocols will always be -# silently skipped. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses -# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6 -# bind * -::* # like the default, all available interfaces -# -# ~~~ WARNING ~~~ If the computer running the server is directly exposed to the -# internet, binding to all the interfaces is dangerous and will expose the -# instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force the server to listen only on the -# IPv4 and IPv6 (if available) loopback interface addresses (this means the server -# will only be able to accept client connections from the same host that it is -# running on). -# -# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# COMMENT OUT THE FOLLOWING LINE. -# -# You will also need to set a password unless you explicitly disable protected -# mode. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# bind 127.0.0.1 -::1 - -# By default, outgoing connections (from replica to primary, from Sentinel to -# instances, cluster bus, etc.) are not bound to a specific local address. In -# most cases, this means the operating system will handle that based on routing -# and the interface through which the connection goes out. -# -# Using bind-source-addr it is possible to configure a specific address to bind -# to, which may also affect how the connection gets routed. -# -# Example: -# -# bind-source-addr 10.0.0.1 - -# Protected mode is a layer of security protection, in order to avoid that -# the server instances left open on the internet are accessed and exploited. -# -# When protected mode is on and the default user has no password, the server -# only accepts local connections from the IPv4 address (127.0.0.1), IPv6 address -# (::1) or Unix domain sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to the server -# even if no authentication is configured. -protected-mode yes - -# The server uses default hardened security configuration directives to reduce the -# attack surface on innocent users. Therefore, several sensitive configuration -# directives are immutable, and some potentially-dangerous commands are blocked. -# -# Configuration directives that control files that the server writes to (e.g., 'dir' -# and 'dbfilename') and that aren't usually modified during runtime -# are protected by making them immutable. -# -# Commands that can increase the attack surface of the server and that aren't usually -# called by users are blocked by default. -# -# These can be exposed to either all connections or just local ones by setting -# each of the configs listed below to either of these values: -# -# no - Block for any connection (remain immutable) -# yes - Allow for any connection (no protection) -# local - Allow only for local connections. Ones originating from the -# IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets. -# -# enable-protected-configs no -# enable-debug-command no -# enable-module-command no - -# Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified the server will not listen on a TCP socket. -port 6379 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need a high backlog in order -# to avoid slow clients connection issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 511 - -# Unix socket. -# -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so the server will not listen -# on a unix socket when not specified. -# -# unixsocket /run/valkey.sock -# unixsocketgroup wheel -# unixsocketperm 700 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Force network equipment in the middle to consider the connection to be -# alive. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -tcp-keepalive 300 - -# Apply OS-specific mechanism to mark the listening socket with the specified -# ID, to support advanced routing and filtering capabilities. -# -# On Linux, the ID represents a connection mark. -# On FreeBSD, the ID represents a socket cookie ID. -# On OpenBSD, the ID represents a route table ID. -# -# The default value is 0, which implies no marking is required. -# socket-mark-id 0 - -################################# TLS/SSL ##################################### - -# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration -# directive can be used to define TLS-listening ports. To enable TLS on the -# default port, use: -# -# port 0 -# tls-port 6379 - -# Configure a X.509 certificate and private key to use for authenticating the -# server to connected clients, primaries or cluster peers. These files should be -# PEM formatted. -# -# tls-cert-file valkey.crt -# tls-key-file valkey.key -# -# If the key file is encrypted using a passphrase, it can be included here -# as well. -# -# tls-key-file-pass secret - -# Normally the server uses the same certificate for both server functions (accepting -# connections) and client functions (replicating from a primary, establishing -# cluster bus connections, etc.). -# -# Sometimes certificates are issued with attributes that designate them as -# client-only or server-only certificates. In that case it may be desired to use -# different certificates for incoming (server) and outgoing (client) -# connections. To do that, use the following directives: -# -# tls-client-cert-file client.crt -# tls-client-key-file client.key -# -# If the key file is encrypted using a passphrase, it can be included here -# as well. -# -# tls-client-key-file-pass secret - -# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange, -# required by older versions of OpenSSL (<3.0). Newer versions do not require -# this configuration and recommend against it. -# -# tls-dh-params-file valkey.dh - -# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL -# clients and peers. The server requires an explicit configuration of at least one -# of these, and will not implicitly use the system wide configuration. -# -# tls-ca-cert-file ca.crt -# tls-ca-cert-dir /etc/ssl/certs - -# By default, clients (including replica servers) on a TLS port are required -# to authenticate using valid client side certificates. -# -# If "no" is specified, client certificates are not required and not accepted. -# If "optional" is specified, client certificates are accepted and must be -# valid if provided, but are not required. -# -# tls-auth-clients no -# tls-auth-clients optional - -# By default, a replica does not attempt to establish a TLS connection -# with its primary. -# -# Use the following directive to enable TLS on replication links. -# -# tls-replication yes - -# By default, the cluster bus uses a plain TCP connection. To enable -# TLS for the bus protocol, use the following directive: -# -# tls-cluster yes - -# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended -# that older formally deprecated versions are kept disabled to reduce the attack surface. -# You can explicitly specify TLS versions to support. -# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", -# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. -# To enable only TLSv1.2 and TLSv1.3, use: -# -# tls-protocols "TLSv1.2 TLSv1.3" - -# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information -# about the syntax of this string. -# -# Note: this configuration applies only to <= TLSv1.2. -# -# tls-ciphers DEFAULT:!MEDIUM - -# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more -# information about the syntax of this string, and specifically for TLSv1.3 -# ciphersuites. -# -# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 - -# When choosing a cipher, use the server's preference instead of the client -# preference. By default, the server follows the client's preference. -# -# tls-prefer-server-ciphers yes - -# By default, TLS session caching is enabled to allow faster and less expensive -# reconnections by clients that support it. Use the following directive to disable -# caching. -# -# tls-session-caching no - -# Change the default number of TLS sessions cached. A zero value sets the cache -# to unlimited size. The default size is 20480. -# -# tls-session-cache-size 5000 - -# Change the default timeout of cached TLS sessions. The default timeout is 300 -# seconds. -# -# tls-session-cache-timeout 60 - -################################# GENERAL ##################################### - -# By default the server does not run as a daemon. Use 'yes' if you need it. -# Note that the server will write a pid file in /var/run/valkey.pid when daemonized. -# When the server is supervised by upstart or systemd, this parameter has no impact. -daemonize no - -# If you run the server from upstart or systemd, the server can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting the server into SIGSTOP mode -# requires "expect stop" in your upstart job config -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# on startup, and updating the server status on a regular -# basis. -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous pings back to your supervisor. -# -# The default is "no". To run under upstart/systemd, you can simply uncomment -# the line below: -# -# supervised auto - -# If a pid file is specified, the server writes it where specified at startup -# and removes it at exit. -# -# When the server runs non daemonized, no pid file is created if none is -# specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/valkey.pid". -# -# Creating a pid file is best effort: if the server is not able to create it -# nothing bad happens, the server will start and run normally. -# -# Note that on modern Linux systems "/run/valkey.pid" is more conforming -# and should be used instead. -pidfile /var/run/valkey_6379.pid - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -# nothing (nothing is logged) -loglevel notice - -# Specify the logging format. -# This can be one of: -# -# - legacy: the default, traditional log format -# - logfmt: a structured log format; see https://www.brandur.org/logfmt -# -# log-format legacy - -# Specify the timestamp format used in logs using 'log-timestamp-format'. -# -# - legacy: default format -# - iso8601: ISO 8601 extended date and time with time zone, on the form -# yyyy-mm-ddThh:mm:ss.sss±hh:mm -# - milliseconds: milliseconds since the epoch -# -# log-timestamp-format legacy - -# Specify the log file name. Also the empty string can be used to force -# the server to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile "" - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident valkey - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# To disable the built in crash log, which will possibly produce cleaner core -# dumps when they are needed, uncomment the following: -# -# crash-log-enabled no - -# To disable the fast memory check that's run as part of the crash log, which -# will possibly let the server terminate sooner, uncomment the following: -# -# crash-memcheck-enabled no - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -# By default the server shows an ASCII art logo only when started to log to the -# standard output and if the standard output is a TTY and syslog logging is -# disabled. Basically this means that normally a logo is displayed only in -# interactive sessions. -# -# However it is possible to force the pre-4.0 behavior and always show a -# ASCII art logo in startup logs by setting the following option to yes. -always-show-logo no - -# User data, including keys, values, client names, and ACL usernames, can be -# logged as part of assertions and other error cases. To prevent sensitive user -# information, such as PII, from being recorded in the server log file, this -# user data is hidden from the log by default. If you need to log user data for -# debugging or troubleshooting purposes, you can disable this feature by -# changing the config value to no. -hide-user-data-from-log yes - -# By default, the server modifies the process title (as seen in 'top' and 'ps') to -# provide some runtime information. It is possible to disable this and leave -# the process name as executed by setting the following to no. -set-proc-title yes - -# When changing the process title, the server uses the following template to construct -# the modified title. -# -# Template variables are specified in curly brackets. The following variables are -# supported: -# -# {title} Name of process as executed if parent, or type of child process. -# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or -# Unix socket if only that's available. -# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]". -# {port} TCP port listening on, or 0. -# {tls-port} TLS port listening on, or 0. -# {unixsocket} Unix domain socket listening on, or "". -# {config-file} Name of configuration file used. -# -proc-title-template "{title} {listen-addr} {server-mode}" - -# Set the local environment which is used for string comparison operations, and -# also affect the performance of Lua scripts. Empty String indicates the locale -# is derived from the environment variables. -locale-collate "" - -# Valkey is largely compatible with Redis OSS, apart from a few cases where -# Valkey identifies itself itself as "Valkey" rather than "Redis". Extended -# Redis OSS compatibility mode makes Valkey pretend to be Redis. Enable this -# only if you have problems with tools or clients. This is a temporary -# configuration added in Valkey 8.0 and is scheduled to have no effect in Valkey -# 9.0 and be completely removed in Valkey 10.0. -# -# extended-redis-compatibility no - -################################ SNAPSHOTTING ################################ - -# Save the DB to disk. -# -# save [ ...] -# -# The server will save the DB if the given number of seconds elapsed and it -# surpassed the given number of write operations against the DB. -# -# Snapshotting can be completely disabled with a single empty string argument -# as in following example: -# -# save "" -# -# Unless specified otherwise, by default the server will save the DB: -# * After 3600 seconds (an hour) if at least 1 change was performed -# * After 300 seconds (5 minutes) if at least 100 changes were performed -# * After 60 seconds if at least 10000 changes were performed -# -# You can set these explicitly by uncommenting the following line. -# -# save 3600 1 300 100 60 10000 - -# By default the server will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again, the server will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the server -# and persistence, you may want to disable this feature so that the server will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes - -# Compress string objects using LZF when dump .rdb databases? -# By default compression is enabled as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# Enables or disables full sanitization checks for ziplist and listpack etc when -# loading an RDB or RESTORE payload. This reduces the chances of a assertion or -# crash later on while processing commands. -# Options: -# no - Never perform full sanitization -# yes - Always perform full sanitization -# clients - Perform full sanitization only for user connections. -# Excludes: RDB files, RESTORE commands received from the primary -# connection, and client connections which have the -# skip-sanitize-payload ACL flag. -# The default should be 'clients' but since it currently affects cluster -# resharding via MIGRATE, it is temporarily set to 'no' by default. -# -# sanitize-dump-payload no - -# The filename where to dump the DB -dbfilename dump.rdb - -# Remove RDB files used by replication in instances without persistence -# enabled. By default this option is disabled, however there are environments -# where for regulations or other security concerns, RDB files persisted on -# disk by primaries in order to feed replicas, or stored on disk by replicas -# in order to load them for the initial synchronization, should be deleted -# ASAP. Note that this option ONLY WORKS in instances that have both AOF -# and RDB persistence disabled, otherwise is completely ignored. -# -# An alternative (and sometimes better) way to obtain the same effect is -# to use diskless replication on both primary and replicas instances. However -# in the case of replicas, diskless is not always an option. -rdb-del-sync-files no - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# The Cluster config file is written relative this directory, if the -# 'cluster-config-file' configuration directive is a relative path. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Replica replication. Use replicaof to make a server a copy of -# another server. A few things to understand ASAP about replication. -# -# +------------------+ +---------------+ -# | Master | ---> | Replica | -# | (receive writes) | | (exact copy) | -# +------------------+ +---------------+ -# -# 1) Replication is asynchronous, but you can configure a primary to -# stop accepting writes if it appears to be not connected with at least -# a given number of replicas. -# 2) Replicas are able to perform a partial resynchronization with the -# primary if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition replicas automatically try to reconnect to primaries -# and resynchronize with them. -# -# replicaof - -# If the primary is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the replica to authenticate before -# starting the replication synchronization process, otherwise the primary will -# refuse the replica request. -# -# primaryauth -# -# However this is not enough if you are using ACLs -# and the default user is not capable of running the PSYNC -# command and/or other commands needed for replication. In this case it's -# better to configure a special user to use with replication, and specify the -# primaryuser configuration as such: -# -# primaryuser -# -# When primaryuser is specified, the replica will authenticate against its -# primary using the new AUTH form: AUTH . - -# When a replica loses its connection with the primary, or when the replication -# is still in progress, the replica can act in two different ways: -# -# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) If replica-serve-stale-data is set to 'no' the replica will reply with error -# "MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'" -# to all data access commands, excluding commands such as: -# INFO, REPLICAOF, AUTH, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, -# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, -# HOST and LATENCY. -# -replica-serve-stale-data yes - -# You can configure a replica instance to accept writes or not. Writing against -# a replica instance may be useful to store some ephemeral data (because data -# written on a replica will be easily deleted after resync with the primary) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# By default, replicas are read-only. -# -# Note: read only replicas are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only replica exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only replicas using 'rename-command' to shadow all the -# administrative / dangerous commands. -replica-read-only yes - -# Replication SYNC strategy: disk or socket. -# -# New replicas and reconnecting replicas that are not able to continue the -# replication process just receiving differences, need to do what is called a -# "full synchronization". An RDB file is transmitted from the primary to the -# replicas. -# -# The transmission can happen in two different ways: -# -# 1) Disk-backed: The primary creates a new process that writes the RDB -# file on disk. Later the file is transferred by the parent -# process to the replicas incrementally. -# 2) Diskless: The primary creates a new process that directly writes the -# RDB file to replica sockets, without touching the disk at all. -# -# With disk-backed replication, while the RDB file is generated, more replicas -# can be queued and served with the RDB file as soon as the current child -# producing the RDB file finishes its work. With diskless replication instead -# once the transfer starts, new replicas arriving will be queued and a new -# transfer will start when the current one terminates. -# -# When diskless replication is used, the primary waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple -# replicas will arrive and the transfer can be parallelized. -# -# With slow disks and fast (large bandwidth) networks, diskless replication -# works better. -repl-diskless-sync yes - -# When diskless replication is enabled, it is possible to configure the delay -# the server waits in order to spawn the child that transfers the RDB via socket -# to the replicas. -# -# This is important since once the transfer starts, it is not possible to serve -# new replicas arriving, that will be queued for the next RDB transfer, so the -# server waits a delay in order to let more replicas arrive. -# -# The delay is specified in seconds, and by default is 5 seconds. To disable -# it entirely just set it to 0 seconds and the transfer will start ASAP. -repl-diskless-sync-delay 5 - -# When diskless replication is enabled with a delay, it is possible to let -# the replication start before the maximum delay is reached if the maximum -# number of replicas expected have connected. Default of 0 means that the -# maximum is not defined and the server will wait the full delay. -repl-diskless-sync-max-replicas 0 - -# ----------------------------------------------------------------------------- -# WARNING: Since in this setup the replica does not immediately store an RDB on -# disk, it may cause data loss during failovers. RDB diskless load + server -# modules not handling I/O reads may cause the server to abort in case of I/O errors -# during the initial synchronization stage with the primary. -# ----------------------------------------------------------------------------- -# -# Replica can load the RDB it reads from the replication link directly from the -# socket, or store the RDB to a file and read that file after it was completely -# received from the primary. -# -# In many cases the disk is slower than the network, and storing and loading -# the RDB file may increase replication time (and even increase the primary's -# Copy on Write memory and replica buffers). -# However, when parsing the RDB file directly from the socket, in order to avoid -# data loss it's only safe to flush the current dataset when the new dataset is -# fully loaded in memory, resulting in higher memory usage. -# For this reason we have the following options: -# -# "disabled" - Don't use diskless load (store the rdb file to the disk first) -# "swapdb" - Keep current db contents in RAM while parsing the data directly -# from the socket. Replicas in this mode can keep serving current -# dataset while replication is in progress, except for cases where -# they can't recognize primary as having a data set from same -# replication history. -# Note that this requires sufficient memory, if you don't have it, -# you risk an OOM kill. -# "on-empty-db" - Use diskless load only when current dataset is empty. This is -# safer and avoid having old and new dataset loaded side by side -# during replication. -# "flush-before-load" - [dangerous] Flush all data before parsing. Note that if -# there's a problem before the replication succeeded you may -# lose all your data. -repl-diskless-load disabled - -# This dual channel replication sync feature optimizes the full synchronization process -# between a primary and its replicas. When enabled, it reduces both memory and CPU load -# on the primary server. -# -# How it works: -# 1. During full sync, instead of accumulating replication data on the primary server, -# the data is sent directly to the syncing replica. -# 2. The primary's background save (bgsave) process streams the RDB snapshot directly -# to the replica over a separate connection. -# -# Tradeoff: -# While this approach reduces load on the primary, it shifts the burden of storing -# the replication buffer to the replica. This means the replica must have sufficient -# memory to accommodate the buffer during synchronization. However, this tradeoff is -# generally beneficial as it prevents potential performance degradation on the primary -# server, which is typically handling more critical operations. -# -# When toggling this configuration on or off during an ongoing synchronization process, -# it does not change the already running sync method. The new configuration will take -# effect only for subsequent synchronization processes. - -dual-channel-replication-enabled no - -# Master send PINGs to its replicas in a predefined interval. It's possible to -# change this interval with the repl_ping_replica_period option. The default -# value is 10 seconds. -# -# repl-ping-replica-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of replica. -# 2) Master timeout from the point of view of replicas (data, pings). -# 3) Replica timeout from the point of view of primaries (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-replica-period otherwise a timeout will be detected -# every time there is low traffic between the primary and the replica. The default -# value is 60 seconds. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the replica socket after SYNC? -# -# If you select "yes", the server will use a smaller number of TCP packets and -# less bandwidth to send data to replicas. But this can add a delay for -# the data to appear on the replica side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the replica side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the primary and replicas are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# replica data when replicas are disconnected for some time, so that when a -# replica wants to reconnect again, often a full resync is not needed, but a -# partial resync is enough, just passing the portion of data the replica -# missed while disconnected. -# -# The bigger the replication backlog, the longer the replica can endure the -# disconnect and later be able to perform a partial resynchronization. -# -# The backlog is only allocated if there is at least one replica connected. -# -# repl-backlog-size 10mb - -# After a primary has no connected replicas for some time, the backlog will be -# freed. The following option configures the amount of seconds that need to -# elapse, starting from the time the last replica disconnected, for the backlog -# buffer to be freed. -# -# Note that replicas never free the backlog for timeout, since they may be -# promoted to primaries later, and should be able to correctly "partially -# resynchronize" with other replicas: hence they should always accumulate backlog. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The replica priority is an integer number published by the server in the INFO -# output. It is used by Sentinel in order to select a replica to promote -# into a primary if the primary is no longer working correctly. -# -# A replica with a low priority number is considered better for promotion, so -# for instance if there are three replicas with priority 10, 100, 25 Sentinel -# will pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the replica as not able to perform the -# role of primary, so a replica with priority of 0 will never be selected by -# Sentinel for promotion. -# -# By default the priority is 100. -replica-priority 100 - -# The propagation error behavior controls how the server will behave when it is -# unable to handle a command being processed in the replication stream from a primary -# or processed while reading from an AOF file. Errors that occur during propagation -# are unexpected, and can cause data inconsistency. -# -# If an application wants to ensure there is no data divergence, this configuration -# should be set to 'panic' instead. The value can also be set to 'panic-on-replicas' -# to only panic when a replica encounters an error on the replication stream. One of -# these two panic values will become the default value in the future once there are -# sufficient safety mechanisms in place to prevent false positive crashes. -# -# propagation-error-behavior ignore - -# Replica ignore disk write errors controls the behavior of a replica when it is -# unable to persist a write command received from its primary to disk. By default, -# this configuration is set to 'no' and will crash the replica in this condition. -# It is not recommended to change this default. -# -# replica-ignore-disk-write-errors no - -# ----------------------------------------------------------------------------- -# By default, Sentinel includes all replicas in its reports. A replica -# can be excluded from Sentinel's announcements. An unannounced replica -# will be ignored by the 'sentinel replicas ' command and won't be -# exposed to Sentinel's clients. -# -# This option does not change the behavior of replica-priority. Even with -# replica-announced set to 'no', the replica can be promoted to primary. To -# prevent this behavior, set replica-priority to 0. -# -# replica-announced yes - -# It is possible for a primary to stop accepting writes if there are less than -# N replicas connected, having a lag less or equal than M seconds. -# -# The N replicas need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the replica, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough replicas -# are available, to the specified number of seconds. -# -# For example to require at least 3 replicas with a lag <= 10 seconds use: -# -# min-replicas-to-write 3 -# min-replicas-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-replicas-to-write is set to 0 (feature disabled) and -# min-replicas-max-lag is set to 10. - -# A primary is able to list the address and port of the attached -# replicas in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# Sentinel in order to discover replica instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a primary. -# -# The listed IP address and port normally reported by a replica is -# obtained in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the replica to connect with the primary. -# -# Port: The port is communicated by the replica during the replication -# handshake, and is normally the port that the replica is using to -# listen for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the replica may actually be reachable via different IP and port -# pairs. The following two options can be used by a replica in order to -# report to its primary a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# replica-announce-ip 5.5.5.5 -# replica-announce-port 1234 - -############################### KEYS TRACKING ################################# - -# The client side caching of values is assisted via server-side support. -# This is implemented using an invalidation table that remembers, using -# a radix key indexed by key name, what clients have which keys. In turn -# this is used in order to send invalidation messages to clients. Please -# check this page to understand more about the feature: -# -# https://valkey.io/topics/client-side-caching -# -# When tracking is enabled for a client, all the read only queries are assumed -# to be cached: this will force the server to store information in the invalidation -# table. When keys are modified, such information is flushed away, and -# invalidation messages are sent to the clients. However if the workload is -# heavily dominated by reads, the server could use more and more memory in order -# to track the keys fetched by many clients. -# -# For this reason it is possible to configure a maximum fill value for the -# invalidation table. By default it is set to 1M of keys, and once this limit -# is reached, the server will start to evict keys in the invalidation table -# even if they were not modified, just to reclaim memory: this will in turn -# force the clients to invalidate the cached values. Basically the table -# maximum size is a trade off between the memory you want to spend server -# side to track information about who cached what, and the ability of clients -# to retain cached objects in memory. -# -# If you set the value to 0, it means there are no limits, and the server will -# retain as many keys as needed in the invalidation table. -# In the "stats" INFO section, you can find information about the number of -# keys in the invalidation table at every given moment. -# -# Note: when key tracking is used in broadcasting mode, no memory is used -# in the server side so this setting is useless. -# -# tracking-table-max-keys 1000000 - -################################## SECURITY ################################### - -# Warning: since the server is pretty fast, an outside user can try up to -# 1 million passwords per second against a modern box. This means that you -# should use very strong passwords, otherwise they will be very easy to break. -# Note that because the password is really a shared secret between the client -# and the server, and should not be memorized by any human, the password -# can be easily a long string from /dev/urandom or whatever, so by using a -# long and unguessable password no brute force attack will be possible. - -# ACL users are defined in the following format: -# -# user ... acl rules ... -# -# For example: -# -# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 -# -# The special username "default" is used for new connections. If this user -# has the "nopass" rule, then new connections will be immediately authenticated -# as the "default" user without the need of any password provided via the -# AUTH command. Otherwise if the "default" user is not flagged with "nopass" -# the connections will start in not authenticated state, and will require -# AUTH (or the HELLO command AUTH option) in order to be authenticated and -# start to work. -# -# The ACL rules that describe what a user can do are the following: -# -# on Enable the user: it is possible to authenticate as this user. -# off Disable the user: it's no longer possible to authenticate -# with this user, however the already authenticated connections -# will still work. -# skip-sanitize-payload RESTORE dump-payload sanitization is skipped. -# sanitize-payload RESTORE dump-payload is sanitized (default). -# + Allow the execution of that command. -# May be used with `|` for allowing subcommands (e.g "+config|get") -# - Disallow the execution of that command. -# May be used with `|` for blocking subcommands (e.g "-config|set") -# +@ Allow the execution of all the commands in such category -# with valid categories are like @admin, @set, @sortedset, ... -# and so forth, see the full list in the server.c file where -# the server command table is described and defined. -# The special category @all means all the commands, but currently -# present in the server, and that will be loaded in the future -# via modules. -# +|first-arg Allow a specific first argument of an otherwise -# disabled command. It is only supported on commands with -# no sub-commands, and is not allowed as negative form -# like -SELECT|1, only additive starting with "+". This -# feature is deprecated and may be removed in the future. -# allcommands Alias for +@all. Note that it implies the ability to execute -# all the future commands loaded via the modules system. -# nocommands Alias for -@all. -# ~ Add a pattern of keys that can be mentioned as part of -# commands. For instance ~* allows all the keys. The pattern -# is a glob-style pattern like the one of KEYS. -# It is possible to specify multiple patterns. -# %R~ Add key read pattern that specifies which keys can be read -# from. -# %W~ Add key write pattern that specifies which keys can be -# written to. -# allkeys Alias for ~* -# resetkeys Flush the list of allowed keys patterns. -# & Add a glob-style pattern of Pub/Sub channels that can be -# accessed by the user. It is possible to specify multiple channel -# patterns. -# allchannels Alias for &* -# resetchannels Flush the list of allowed channel patterns. -# > Add this password to the list of valid password for the user. -# For example >mypass will add "mypass" to the list. -# This directive clears the "nopass" flag (see later). -# < Remove this password from the list of valid passwords. -# nopass All the set passwords of the user are removed, and the user -# is flagged as requiring no password: it means that every -# password will work against this user. If this directive is -# used for the default user, every new connection will be -# immediately authenticated with the default user without -# any explicit AUTH command required. Note that the "resetpass" -# directive will clear this condition. -# resetpass Flush the list of allowed passwords. Moreover removes the -# "nopass" status. After "resetpass" the user has no associated -# passwords and there is no way to authenticate without adding -# some password (or setting it as "nopass" later). -# reset Performs the following actions: resetpass, resetkeys, resetchannels, -# allchannels (if acl-pubsub-default is set), off, clearselectors, -@all. -# The user returns to the same state it has immediately after its creation. -# () Create a new selector with the options specified within the -# parentheses and attach it to the user. Each option should be -# space separated. The first character must be ( and the last -# character must be ). -# clearselectors Remove all of the currently attached selectors. -# Note this does not change the "root" user permissions, -# which are the permissions directly applied onto the -# user (outside the parentheses). -# -# ACL rules can be specified in any order: for instance you can start with -# passwords, then flags, or key patterns. However note that the additive -# and subtractive rules will CHANGE MEANING depending on the ordering. -# For instance see the following example: -# -# user alice on +@all -DEBUG ~* >somepassword -# -# This will allow "alice" to use all the commands with the exception of the -# DEBUG command, since +@all added all the commands to the set of the commands -# alice can use, and later DEBUG was removed. However if we invert the order -# of two ACL rules the result will be different: -# -# user alice on -DEBUG +@all ~* >somepassword -# -# Now DEBUG was removed when alice had yet no commands in the set of allowed -# commands, later all the commands are added, so the user will be able to -# execute everything. -# -# Basically ACL rules are processed left-to-right. -# -# The following is a list of command categories and their meanings: -# * keyspace - Writing or reading from keys, databases, or their metadata -# in a type agnostic way. Includes DEL, RESTORE, DUMP, RENAME, EXISTS, DBSIZE, -# KEYS, EXPIRE, TTL, FLUSHALL, etc. Commands that may modify the keyspace, -# key or metadata will also have `write` category. Commands that only read -# the keyspace, key or metadata will have the `read` category. -# * read - Reading from keys (values or metadata). Note that commands that don't -# interact with keys, will not have either `read` or `write`. -# * write - Writing to keys (values or metadata) -# * admin - Administrative commands. Normal applications will never need to use -# these. Includes REPLICAOF, CONFIG, DEBUG, SAVE, MONITOR, ACL, SHUTDOWN, etc. -# * dangerous - Potentially dangerous (each should be considered with care for -# various reasons). This includes FLUSHALL, MIGRATE, RESTORE, SORT, KEYS, -# CLIENT, DEBUG, INFO, CONFIG, SAVE, REPLICAOF, etc. -# * connection - Commands affecting the connection or other connections. -# This includes AUTH, SELECT, COMMAND, CLIENT, ECHO, PING, etc. -# * blocking - Potentially blocking the connection until released by another -# command. -# * fast - Fast O(1) commands. May loop on the number of arguments, but not the -# number of elements in the key. -# * slow - All commands that are not Fast. -# * pubsub - PUBLISH / SUBSCRIBE related -# * transaction - WATCH / MULTI / EXEC related commands. -# * scripting - Scripting related. -# * set - Data type: sets related. -# * sortedset - Data type: zsets related. -# * list - Data type: lists related. -# * hash - Data type: hashes related. -# * string - Data type: strings related. -# * bitmap - Data type: bitmaps related. -# * hyperloglog - Data type: hyperloglog related. -# * geo - Data type: geo related. -# * stream - Data type: streams related. -# -# For more information about ACL configuration please refer to -# the Valkey web site at https://valkey.io/topics/acl - -# ACL LOG -# -# The ACL Log tracks failed commands and authentication events associated -# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked -# by ACLs. The ACL Log is stored in memory. You can reclaim memory with -# ACL LOG RESET. Define the maximum entry length of the ACL Log below. -acllog-max-len 128 - -# Using an external ACL file -# -# Instead of configuring users here in this file, it is possible to use -# a stand-alone file just listing users. The two methods cannot be mixed: -# if you configure users here and at the same time you activate the external -# ACL file, the server will refuse to start. -# -# The format of the external ACL user file is exactly the same as the -# format that is used inside valkey.conf to describe users. -# -# aclfile /etc/valkey/users.acl - -# IMPORTANT NOTE: "requirepass" is just a compatibility -# layer on top of the new ACL system. The option effect will be just setting -# the password for the default user. Clients will still authenticate using -# AUTH as usually, or more explicitly with AUTH default -# if they follow the new protocol: both will work. -# -# The requirepass is not compatible with aclfile option and the ACL LOAD -# command, these will cause requirepass to be ignored. -# -# requirepass foobared - -# The default Pub/Sub channels permission for new users is controlled by the -# acl-pubsub-default configuration directive, which accepts one of these values: -# -# allchannels: grants access to all Pub/Sub channels -# resetchannels: revokes access to all Pub/Sub channels -# -# acl-pubsub-default defaults to 'resetchannels' permission. -# -# acl-pubsub-default resetchannels - -# Command renaming (DEPRECATED). -# -# ------------------------------------------------------------------------ -# WARNING: avoid using this option if possible. Instead use ACLs to remove -# commands from the default user, and put them only in some admin user you -# create for administrative purposes. -# ------------------------------------------------------------------------ -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to replicas may cause problems. - -################################### CLIENTS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as the server reserves a few file descriptors for internal uses). -# -# Once the limit is reached the server will close all the new connections sending -# an error 'max number of clients reached'. -# -# IMPORTANT: With a cluster-enabled setup, the max number of connections is also -# shared with the cluster bus: every node in the cluster will use two -# connections, one incoming and another outgoing. It is important to size the -# limit accordingly in case of very large clusters. -# -# maxclients 10000 - -############################## MEMORY MANAGEMENT ################################ - -# Set a memory usage limit to the specified amount of bytes. -# When the memory limit is reached the server will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If the server can't remove keys according to the policy, or if the policy is -# set to 'noeviction', the server will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using the server as an LRU or LFU cache, or to -# set a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have replicas attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the replicas are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of replicas is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have replicas attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for replica -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory - -# MAXMEMORY POLICY: how the server will select what to remove when maxmemory -# is reached. You can select one from the following behaviors: -# -# volatile-lru -> Evict using approximated LRU, only keys with an expire set. -# allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. -# allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key having an expire set. -# allkeys-random -> Remove a random key, any key. -# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) -# noeviction -> Don't evict anything, just return an error on write operations. -# -# LRU means Least Recently Used -# LFU means Least Frequently Used -# -# Both LRU, LFU and volatile-ttl are implemented using approximated -# randomized algorithms. -# -# Note: with any of the above policies, when there are no suitable keys for -# eviction, the server will return an error on write operations that require -# more memory. These are usually commands that create new keys, add data or -# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE, -# SORT (due to the STORE argument), and EXEC (if the transaction includes any -# command that requires memory). -# -# The default is: -# -# maxmemory-policy noeviction - -# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can tune it for speed or -# accuracy. By default the server will check five keys and pick the one that was -# used least recently, you can change the sample size using the following -# configuration directive. -# -# The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. The maximum -# value that can be set is 64. -# -# maxmemory-samples 5 - -# Eviction processing is designed to function well with the default setting. -# If there is an unusually large amount of write traffic, this value may need to -# be increased. Decreasing this value may reduce latency at the risk of -# eviction processing effectiveness -# 0 = minimum latency, 10 = default, 100 = process without regard to latency -# -# maxmemory-eviction-tenacity 10 - -# By default a replica will ignore its maxmemory setting -# (unless it is promoted to primary after a failover or manually). It means -# that the eviction of keys will be just handled by the primary, sending the -# DEL commands to the replica as keys evict in the primary side. -# -# This behavior ensures that primaries and replicas stay consistent, and is usually -# what you want, however if your replica is writable, or you want the replica -# to have a different memory setting, and you are sure all the writes performed -# to the replica are idempotent, then you may change this default (but be sure -# to understand what you are doing). -# -# Note that since the replica by default does not evict, it may end using more -# memory than the one set via maxmemory (there are certain buffers that may -# be larger on the replica, or data structures may sometimes take more memory -# and so forth). So make sure you monitor your replicas and make sure they -# have enough memory to never hit a real out-of-memory condition before the -# primary hits the configured maxmemory setting. -# -# replica-ignore-maxmemory yes - -# The server reclaims expired keys in two ways: upon access when those keys are -# found to be expired, and also in background, in what is called the -# "active expire key". The key space is slowly and interactively scanned -# looking for expired keys to reclaim, so that it is possible to free memory -# of keys that are expired and will never be accessed again in a short time. -# -# The default effort of the expire cycle will try to avoid having more than -# ten percent of expired keys still in memory, and will try to avoid consuming -# more than 25% of total memory and to add latency to the system. However -# it is possible to increase the expire "effort" that is normally set to -# "1", to a greater value, up to the value "10". At its maximum value the -# system will use more CPU, longer cycles (and technically may introduce -# more latency), and will tolerate less already expired keys still present -# in the system. It's a tradeoff between memory, CPU and latency. -# -# active-expire-effort 1 - -############################# LAZY FREEING #################################### - -# When keys are deleted, the served has historically freed their memory using -# blocking operations. It means that the server stopped processing new commands -# in order to reclaim all the memory associated with an object in a synchronous -# way. If the key deleted is associated with a small object, the time needed -# in order to execute the DEL command is very small and comparable to most other -# O(1) or O(log_N) commands in the server. However if the key is associated with an -# aggregated value containing millions of elements, the server can block for -# a long time (even seconds) in order to complete the operation. -# -# For the above reasons, lazy freeing (or asynchronous freeing), has been -# introduced. With lazy freeing, keys are deleted in constant time. Another -# thread will incrementally free the object in the background as fast as -# possible. -# -# Starting from Valkey 8.0, lazy freeing is enabled by default. It is possible -# to retain the synchronous freeing behaviour by setting the lazyfree related -# configuration directives to 'no'. - -# Commands like DEL, FLUSHALL and FLUSHDB delete keys, but the server can also -# delete keys or flush the whole database as a side effect of other operations. -# Specifically the server deletes objects independently of a user call in the -# following scenarios: -# -# 1) On eviction, because of the maxmemory and maxmemory policy configurations, -# in order to make room for new data, without going over the specified -# memory limit. -# 2) Because of expire: when a key with an associated time to live (see the -# EXPIRE command) must be deleted from memory. -# 3) Because of a side effect of a command that stores data on a key that may -# already exist. For example the RENAME command may delete the old key -# content when it is replaced with another one. Similarly SUNIONSTORE -# or SORT with STORE option may delete existing keys. The SET command -# itself removes any old content of the specified key in order to replace -# it with the specified string. -# 4) During replication, when a replica performs a full resynchronization with -# its primary, the content of the whole database is removed in order to -# load the RDB file just transferred. -# -# In all the above cases, the default is to release memory in a non-blocking -# way. - -lazyfree-lazy-eviction yes -lazyfree-lazy-expire yes -lazyfree-lazy-server-del yes -replica-lazy-flush yes - -# For keys deleted using the DEL command, lazy freeing is controlled by the -# configuration directive 'lazyfree-lazy-user-del'. The default is 'yes'. The -# UNLINK command is identical to the DEL command, except that UNLINK always -# frees the memory lazily, regardless of this configuration directive: - -lazyfree-lazy-user-del yes - -# FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous -# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the -# commands. When neither flag is passed, this directive will be used to determine -# if the data should be deleted asynchronously. -# -# When a replica performs a node reset via CLUSTER RESET, the entire -# database content is removed to allow the node to become an empty primary. -# This directive also determines whether the data should be deleted asynchronously. -# -# There are many problems with running flush synchronously. Even in single CPU -# environments, the thread managers should balance between the freeing and -# serving incoming requests. The default value is yes. - -lazyfree-lazy-user-flush yes - -################################ THREADED I/O ################################# - -# The server is mostly single threaded, however there are certain threaded -# operations such as UNLINK, slow I/O accesses and other things that are -# performed on side threads. -# -# Now it is also possible to handle the server clients socket reads and writes -# in different I/O threads. Since especially writing is so slow, normally -# users use pipelining in order to speed up the server performances per -# core, and spawn multiple instances in order to scale more. Using I/O -# threads it is possible to easily speedup two times the server without resorting -# to pipelining nor sharding of the instance. -# -# By default threading is disabled, we suggest enabling it only in machines -# that have at least 3 or more cores, leaving at least one spare core. -# We also recommend using threaded I/O only if you actually have performance problems, with -# instances being able to use a quite big percentage of CPU time, otherwise -# there is no point in using this feature. -# -# So for instance if you have a four cores boxes, try to use 2 or 3 I/O -# threads, if you have a 8 cores, try to use 6 threads. In order to -# enable I/O threads use the following configuration directive: -# -# io-threads 4 -# -# Setting io-threads to 1 will just use the main thread as usual. -# When I/O threads are enabled, we use threads for reads and writes, that is -# to thread the write and read syscall and transfer the client buffers to the -# socket and to enable threading of reads and protocol parsing. -# -# When multiple commands are parsed by the I/O threads and ready for execution, -# we take advantage of knowing the next set of commands and prefetch their -# required dictionary entries in a batch. This reduces memory access costs. -# -# The optimal batch size depends on the specific workflow of the user. -# The default batch size is 16, which can be modified using the -# 'prefetch-batch-max-size' config. -# -# When the config is set to 0, prefetching is disabled. -# -# prefetch-batch-max-size 16 -# -# NOTE: -# 1. The 'io-threads-do-reads' config is deprecated and has no effect. Please -# avoid using this config if possible. -# -# 2. If you want to test the server speedup using valkey-benchmark, make -# sure you also run the benchmark itself in threaded mode, using the -# --threads option to match the number of server threads, otherwise you'll not -# be able to notice the improvements. - -############################ KERNEL OOM CONTROL ############################## - -# On Linux, it is possible to hint the kernel OOM killer on what processes -# should be killed first when out of memory. -# -# Enabling this feature makes the server actively control the oom_score_adj value -# for all its processes, depending on their role. The default scores will -# attempt to have background child processes killed before all others, and -# replicas killed before primaries. -# -# The server supports these options: -# -# no: Don't make changes to oom-score-adj (default). -# yes: Alias to "relative" see below. -# absolute: Values in oom-score-adj-values are written as is to the kernel. -# relative: Values are used relative to the initial value of oom_score_adj when -# the server starts and are then clamped to a range of -1000 to 1000. -# Because typically the initial value is 0, they will often match the -# absolute values. -oom-score-adj no - -# When oom-score-adj is used, this directive controls the specific values used -# for primary, replica and background child processes. Values range -2000 to -# 2000 (higher means more likely to be killed). -# -# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) -# can freely increase their value, but not decrease it below its initial -# settings. This means that setting oom-score-adj to "relative" and setting the -# oom-score-adj-values to positive values will always succeed. -oom-score-adj-values 0 200 800 - - -#################### KERNEL transparent hugepage CONTROL ###################### - -# Usually the kernel Transparent Huge Pages control is set to "madvise" or -# "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which -# case this config has no effect. On systems in which it is set to "always", -# the server will attempt to disable it specifically for the server process in order -# to avoid latency problems specifically with fork(2) and CoW. -# If for some reason you prefer to keep it enabled, you can set this config to -# "no" and the kernel global to "always". - -disable-thp yes - -############################## APPEND ONLY MODE ############################### - -# By default the server asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the server process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) the server can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup the server will load the AOF, that is the file -# with the better durability guarantees. -# -# Note that changing this value in a config file of an existing database and -# restarting the server can lead to data loss. A conversion needs to be done -# by setting it via CONFIG command on a live server first. -# -# Please check https://valkey.io/topics/persistence for more information. - -appendonly no - -# The base name of the append only file. -# -# The server uses a set of append-only files to persist the dataset -# and changes applied to it. There are two basic types of files in use: -# -# - Base files, which are a snapshot representing the complete state of the -# dataset at the time the file was created. Base files can be either in -# the form of RDB (binary serialized) or AOF (textual commands). -# - Incremental files, which contain additional commands that were applied -# to the dataset following the previous file. -# -# In addition, manifest files are used to track the files and the order in -# which they were created and should be applied. -# -# Append-only file names are created by the server following a specific pattern. -# The file name's prefix is based on the 'appendfilename' configuration -# parameter, followed by additional information about the sequence and type. -# -# For example, if appendfilename is set to appendonly.aof, the following file -# names could be derived: -# -# - appendonly.aof.1.base.rdb as a base file. -# - appendonly.aof.1.incr.aof, appendonly.aof.2.incr.aof as incremental files. -# - appendonly.aof.manifest as a manifest file. - -appendfilename "appendonly.aof" - -# For convenience, the server stores all persistent append-only files in a dedicated -# directory. The name of the directory is determined by the appenddirname -# configuration parameter. - -appenddirname "appendonlydir" - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# The server supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# the server may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of the server is -# the same as "appendfsync no". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# The server is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: The server remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -# An AOF file may be found to be truncated at the end during the server -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where the server is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when the server itself -# crashes or aborts but the operating system still works correctly). -# -# The server can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "valkey-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# the server will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated yes - -# The server can create append-only base files in either RDB or AOF formats. Using -# the RDB format is always faster and more efficient, and disabling it is only -# supported for backward compatibility purposes. -aof-use-rdb-preamble yes - -# The server supports recording timestamp annotations in the AOF to support restoring -# the data from a specific point-in-time. However, using this capability changes -# the AOF format in a way that may not be compatible with existing AOF parsers. -aof-timestamp-enabled no - -################################ SHUTDOWN ##################################### - -# Maximum time to wait for replicas when shutting down, in seconds. -# -# During shut down, a grace period allows any lagging replicas to catch up with -# the latest replication offset before the primary exits. This period can -# prevent data loss, especially for deployments without configured disk backups. -# -# The 'shutdown-timeout' value is the grace period's duration in seconds. It is -# only applicable when the instance has replicas. To disable the feature, set -# the value to 0. -# -# shutdown-timeout 10 - -# When the server receives a SIGINT or SIGTERM, shutdown is initiated and by default -# an RDB snapshot is written to disk in a blocking operation if save points are configured. -# The options used on signaled shutdown can include the following values: -# default: Saves RDB snapshot only if save points are configured. -# Waits for lagging replicas to catch up. -# save: Forces a DB saving operation even if no save points are configured. -# nosave: Prevents DB saving operation even if one or more save points are configured. -# now: Skips waiting for lagging replicas. -# force: Ignores any errors that would normally prevent the server from exiting. -# -# Any combination of values is allowed as long as "save" and "nosave" are not set simultaneously. -# Example: "nosave force now" -# -# shutdown-on-sigint default -# shutdown-on-sigterm default - -################ NON-DETERMINISTIC LONG BLOCKING COMMANDS ##################### - -# Maximum time in milliseconds for EVAL scripts, functions and in some cases -# modules' commands before the server can start processing or rejecting other clients. -# -# If the maximum execution time is reached the server will start to reply to most -# commands with a BUSY error. -# -# In this state the server will only allow a handful of commands to be executed. -# For instance, SCRIPT KILL, FUNCTION KILL, SHUTDOWN NOSAVE and possibly some -# module specific 'allow-busy' commands. -# -# SCRIPT KILL and FUNCTION KILL will only be able to stop a script that did not -# yet call any write commands, so SHUTDOWN NOSAVE may be the only way to stop -# the server in the case a write command was already issued by the script when -# the user doesn't want to wait for the natural termination of the script. -# -# The default is 5 seconds. It is possible to set it to 0 or a negative value -# to disable this mechanism (uninterrupted execution). Note that in the past -# this config had a different name, which is now an alias, so both of these do -# the same: -# lua-time-limit 5000 -# busy-reply-threshold 5000 - -################################ VALKEY CLUSTER ############################### - -# Normal server instances can't be part of a cluster; only nodes that are -# started as cluster nodes can. In order to start a server instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by each node. -# Every cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system do not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6379.conf - -# Cluster node timeout is the amount of milliseconds a node must be unreachable -# for it to be considered in failure state. -# Most other internal time limits are a multiple of the node timeout. -# -# cluster-node-timeout 15000 - -# The cluster port is the port that the cluster bus will listen for inbound connections on. When set -# to the default value, 0, it will be bound to the command port + 10000. Setting this value requires -# you to specify the cluster bus port when executing cluster meet. -# cluster-port 0 - -# A replica of a failing primary will avoid to start a failover if its data -# looks too old. -# -# There is no simple way for a replica to actually have an exact measure of -# its "data age", so the following two checks are performed: -# -# 1) If there are multiple replicas able to failover, they exchange messages -# in order to try to give an advantage to the replica with the best -# replication offset (more data from the primary processed). -# Replicas will try to get their rank by offset, and apply to the start -# of the failover a delay proportional to their rank. -# -# 2) Every single replica computes the time of the last interaction with -# its primary. This can be the last ping or command received (if the primary -# is still in the "connected" state), or the time that elapsed since the -# disconnection with the primary (if the replication link is currently down). -# If the last interaction is too old, the replica will not try to failover -# at all. -# -# The point "2" can be tuned by user. Specifically a replica will not perform -# the failover if, since the last interaction with the primary, the time -# elapsed is greater than: -# -# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period -# -# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor -# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the -# replica will not try to failover if it was not able to talk with the primary -# for longer than 310 seconds. -# -# A large cluster-replica-validity-factor may allow replicas with too old data to failover -# a primary, while a too small value may prevent the cluster from being able to -# elect a replica at all. -# -# For maximum availability, it is possible to set the cluster-replica-validity-factor -# to a value of 0, which means, that replicas will always try to failover the -# primary regardless of the last time they interacted with the primary. -# (However they'll always try to apply a delay proportional to their -# offset rank). -# -# Zero is the only value able to guarantee that when all the partitions heal -# the cluster will always be able to continue. -# -# cluster-replica-validity-factor 10 - -# Cluster replicas are able to migrate to orphaned primaries, that are primaries -# that are left without working replicas. This improves the cluster ability -# to resist to failures as otherwise an orphaned primary can't be failed over -# in case of failure if it has no working replicas. -# -# Replicas migrate to orphaned primaries only if there are still at least a -# given number of other working replicas for their old primary. This number -# is the "migration barrier". A migration barrier of 1 means that a replica -# will migrate only if there is at least 1 other working replica for its primary -# and so forth. It usually reflects the number of replicas you want for every -# primary in your cluster. -# -# Default is 1 (replicas migrate only if their primaries remain with at least -# one replica). To disable migration just set it to a very large value or -# set cluster-allow-replica-migration to 'no'. -# A value of 0 can be set but is useful only for debugging and dangerous -# in production. -# -# cluster-migration-barrier 1 - -# Turning off this option allows to use less automatic cluster configuration. -# It disables migration of replicas to orphaned primaries. Masters that become -# empty due to losing their last slots to another primary will not automatically -# replicate from the primary that took over their last slots. Instead, they will -# remain as empty primaries without any slots. -# -# Default is 'yes' (allow automatic migrations). -# -# cluster-allow-replica-migration yes - -# By default cluster nodes stop accepting queries if they detect there -# is at least a hash slot uncovered (no available node is serving it). -# This way if the cluster is partially down (for example a range of hash slots -# are no longer covered) all the cluster becomes, eventually, unavailable. -# It automatically returns available as soon as all the slots are covered again. -# -# However sometimes you want the subset of the cluster which is working, -# to continue to accept queries for the part of the key space that is still -# covered. In order to do so, just set the cluster-require-full-coverage -# option to no. -# -# cluster-require-full-coverage yes - -# This option, when set to yes, prevents replicas from trying to failover its -# primary during primary failures. However the replica can still perform a -# manual failover, if forced to do so. -# -# This is useful in different scenarios, especially in the case of multiple -# data center operations, where we want one side to never be promoted if not -# in the case of a total DC failure. -# -# cluster-replica-no-failover no - -# This option, when set to yes, allows nodes to serve read traffic while the -# cluster is in a down state, as long as it believes it owns the slots. -# -# This is useful for two cases. The first case is for when an application -# doesn't require consistency of data during node failures or network partitions. -# One example of this is a cache, where as long as the node has the data it -# should be able to serve it. -# -# The second use case is for configurations that don't meet the recommended -# three shards but want to enable cluster mode and scale later. A -# primary outage in a 1 or 2 shard configuration causes a read/write outage to the -# entire cluster without this option set, with it set there is only a write outage. -# Without a quorum of primaries, slot ownership will not change automatically. -# -# cluster-allow-reads-when-down no - -# This option, when set to yes, allows nodes to serve pubsub shard traffic while -# the cluster is in a down state, as long as it believes it owns the slots. -# -# This is useful if the application would like to use the pubsub feature even when -# the cluster global stable state is not OK. If the application wants to make sure only -# one shard is serving a given channel, this feature should be kept as yes. -# -# cluster-allow-pubsubshard-when-down yes - -# Cluster link send buffer limit is the limit on the memory usage of an individual -# cluster bus link's send buffer in bytes. Cluster links would be freed if they exceed -# this limit. This is to primarily prevent send buffers from growing unbounded on links -# toward slow peers (E.g. PubSub messages being piled up). -# This limit is disabled by default. Enable this limit when 'mem_cluster_links' INFO field -# and/or 'send-buffer-allocated' entries in the 'CLUSTER LINKS` command output continuously increase. -# Minimum limit of 1gb is recommended so that cluster link buffer can fit in at least a single -# PubSub message by default. (client-query-buffer-limit default value is 1gb) -# -# cluster-link-sendbuf-limit 0 - -# Clusters can configure their announced hostname using this config. This is a common use case for -# applications that need to use TLS Server Name Indication (SNI) or dealing with DNS based -# routing. By default this value is only shown as additional metadata in the CLUSTER SLOTS -# command, but can be changed using 'cluster-preferred-endpoint-type' config. This value is -# communicated along the clusterbus to all nodes, setting it to an empty string will remove -# the hostname and also propagate the removal. -# -# cluster-announce-hostname "" - -# Clusters can configure an optional nodename to be used in addition to the node ID for -# debugging and admin information. This name is broadcasted between nodes, so will be used -# in addition to the node ID when reporting cross node events such as node failures. -# cluster-announce-human-nodename "" - -# Clusters can advertise how clients should connect to them using either their IP address, -# a user defined hostname, or by declaring they have no endpoint. Which endpoint is -# shown as the preferred endpoint is set by using the cluster-preferred-endpoint-type -# config with values 'ip', 'hostname', or 'unknown-endpoint'. This value controls how -# the endpoint returned for MOVED/ASKING requests as well as the first field of CLUSTER SLOTS. -# If the preferred endpoint type is set to hostname, but no announced hostname is set, a '?' -# will be returned instead. -# -# When a cluster advertises itself as having an unknown endpoint, it's indicating that -# the server doesn't know how clients can reach the cluster. This can happen in certain -# networking situations where there are multiple possible routes to the node, and the -# server doesn't know which one the client took. In this case, the server is expecting -# the client to reach out on the same endpoint it used for making the last request, but use -# the port provided in the response. -# -# cluster-preferred-endpoint-type ip - -# The cluster blacklist is used when removing a node from the cluster completely. -# When CLUSTER FORGET is called for a node, that node is put into the blacklist for -# some time so that when gossip messages are received from other nodes that still -# remember it, it is not re-added. This gives time for CLUSTER FORGET to be sent to -# every node in the cluster. The blacklist TTL is 60 seconds by default, which should -# be sufficient for most clusters, but you may considering increasing this if you see -# nodes getting re-added while using CLUSTER FORGET. -# -# cluster-blacklist-ttl 60 - -# Clusters can be configured to track per-slot resource statistics, -# which are accessible by the CLUSTER SLOT-STATS command. -# -# By default, the 'cluster-slot-stats-enabled' is disabled, and only 'key-count' is captured. -# By enabling the 'cluster-slot-stats-enabled' config, the cluster will begin to capture advanced statistics. -# These statistics can be leveraged to assess general slot usage trends, identify hot / cold slots, -# migrate slots for a balanced cluster workload, and / or re-write application logic to better utilize slots. -# -# cluster-slot-stats-enabled no - -# In order to setup your cluster make sure to read the documentation -# available at https://valkey.io web site. - -########################## CLUSTER DOCKER/NAT support ######################## - -# In certain deployments, cluster node's address discovery fails, because -# addresses are NAT-ted or because ports are forwarded (the typical case is -# Docker and other containers). -# -# In order to make a cluster work in such environments, a static -# configuration where each node knows its public address is needed. The -# following options are used for this scope, and are: -# -# * cluster-announce-ip -# * cluster-announce-client-ipv4 -# * cluster-announce-client-ipv6 -# * cluster-announce-port -# * cluster-announce-tls-port -# * cluster-announce-bus-port -# -# Each instructs the node about its address, possibly other addresses to expose -# to clients, client ports (for connections without and with TLS) and cluster -# message bus port. The information is then published in the bus packets so that -# other nodes will be able to correctly map the address of the node publishing -# the information. -# -# If tls-cluster is set to yes and cluster-announce-tls-port is omitted or set -# to zero, then cluster-announce-port refers to the TLS port. Note also that -# cluster-announce-tls-port has no effect if tls-cluster is set to no. -# -# If cluster-announce-client-ipv4 and cluster-announce-client-ipv6 are omitted, -# then cluster-announce-ip is exposed to clients. -# -# If the above options are not used, the normal cluster auto-detection -# will be used instead. -# -# Note that when remapped, the bus port may not be at the fixed offset of -# clients port + 10000, so you can specify any port and bus-port depending -# on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usual. -# -# Example: -# -# cluster-announce-ip 10.1.1.5 -# cluster-announce-client-ipv4 123.123.123.5 -# cluster-announce-client-ipv6 2001:db8::8a2e:370:7334 -# cluster-announce-tls-port 6379 -# cluster-announce-port 0 -# cluster-announce-bus-port 6380 - -################################## SLOW LOG ################################### - -# The server Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells the server -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The server latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a server instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enabled at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -################################ LATENCY TRACKING ############################## - -# The server's extended latency monitoring tracks the per command latencies and enables -# exporting the percentile distribution via the INFO latencystats command, -# and cumulative latency distributions (histograms) via the LATENCY command. -# -# By default, the extended latency monitoring is enabled since the overhead -# of keeping track of the command latency is very small. -# latency-tracking yes - -# By default the exported latency percentiles via the INFO latencystats command -# are the p50, p99, and p999. -# latency-tracking-info-percentiles 50 99 99.9 - -############################# EVENT NOTIFICATION ############################## - -# The server can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at https://valkey.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that the server will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# n New key events (Note: not included in the 'A' class) -# t Stream commands -# d Module key type events -# m Key-miss events (Note: It is not included in the 'A' class) -# A Alias for g$lshzxetd, so that the "AKE" string means all the events -# (Except key-miss events which are excluded from 'A' due to their -# unique nature). -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-listpack-entries 512 -hash-max-listpack-value 64 - -# Lists are also encoded in a special way to save a lot of space. -# The number of entries allowed per internal list node can be specified -# as a fixed maximum size or a maximum number of elements. -# For a fixed maximum size, use -5 through -1, meaning: -# -5: max size: 64 Kb <-- not recommended for normal workloads -# -4: max size: 32 Kb <-- not recommended -# -3: max size: 16 Kb <-- probably not recommended -# -2: max size: 8 Kb <-- good -# -1: max size: 4 Kb <-- good -# Positive numbers mean store up to _exactly_ that number of elements -# per list node. -# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), -# but if your use case is unique, adjust the settings as necessary. -list-max-listpack-size -2 - -# Lists may also be compressed. -# Compress depth is the number of quicklist ziplist nodes from *each* side of -# the list to *exclude* from compression. The head and tail of the list -# are always uncompressed for fast push/pop operations. Settings are: -# 0: disable all list compression -# 1: depth 1 means "don't start compressing until after 1 node into the list, -# going from either the head or tail" -# So: [head]->node->node->...->node->[tail] -# [head], [tail] will always be uncompressed; inner nodes will compress. -# 2: [head]->[next]->node->node->...->node->[prev]->[tail] -# 2 here means: don't compress head or head->next or tail->prev or tail, -# but compress all nodes between them. -# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] -# etc. -list-compress-depth 0 - -# Sets have a special encoding when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Sets containing non-integer values are also encoded using a memory efficient -# data structure when they have a small number of entries, and the biggest entry -# does not exceed a given threshold. These thresholds can be configured using -# the following directives. -set-max-listpack-entries 128 -set-max-listpack-value 64 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-listpack-entries 128 -zset-max-listpack-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When a HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Streams macro node max size / items. The stream data structure is a radix -# tree of big nodes that encode multiple items inside. Using this configuration -# it is possible to configure how big a single node can be in bytes, and the -# maximum number of items it may contain before switching to a new node when -# appending new stream entries. If any of the following settings are set to -# zero, the limit is ignored, so for instance it is possible to set just a -# max entries limit by setting max-bytes to 0 and max-entries to the desired -# value. -stream-node-max-bytes 4096 -stream-node-max-entries 100 - -# Active rehashing uses 1% of the CPU time to help perform incremental rehashing -# of the main server hash tables, the ones mapping top-level keys to values. -# -# If active rehashing is disabled and rehashing is needed, a hash table is -# rehashed one "step" on every operation performed on the hash table (add, find, -# etc.), so if the server is idle, the rehashing may never complete and some -# more memory is used by the hash tables. Active rehashing helps prevent this. -# -# Active rehashing runs as a background task. Depending on the value of 'hz', -# the frequency at which the server performs background tasks, active rehashing -# can cause the server to freeze for a short time. For example, if 'hz' is set -# to 10, active rehashing runs for up to one millisecond every 100 milliseconds. -# If a freeze of one millisecond is not acceptable, you can increase 'hz' to let -# active rehashing run more often. If instead 'hz' is set to 100, active -# rehashing runs up to only 100 microseconds every 10 milliseconds. The total is -# still 1% of the time. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# replica -> replica clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and replica clients, since -# subscribers and replicas receive data in a push fashion. -# -# Note that it doesn't make sense to set the replica clients output buffer -# limit lower than the repl-backlog-size config (partial sync will succeed -# and then replica will get disconnected). -# Such a configuration is ignored (the size of repl-backlog-size will be used). -# This doesn't have memory consumption implications since the replica client -# will share the backlog buffers memory. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit replica 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Client query buffers accumulate new commands. They are limited to a fixed -# amount by default in order to avoid that a protocol desynchronization (for -# instance due to a bug in the client) will lead to unbound memory usage in -# the query buffer. However you can configure it here if you have very special -# needs, such as a command with huge argument, or huge multi/exec requests or alike. -# -# client-query-buffer-limit 1gb - -# In some scenarios client connections can hog up memory leading to OOM -# errors or data eviction. To avoid this we can cap the accumulated memory -# used by all client connections (all pubsub and normal clients). Once we -# reach that limit connections will be dropped by the server freeing up -# memory. The server will attempt to drop the connections using the most -# memory first. We call this mechanism "client eviction". -# -# Client eviction is configured using the maxmemory-clients setting as follows: -# 0 - client eviction is disabled (default) -# -# A memory value can be used for the client eviction threshold, -# for example: -# maxmemory-clients 1g -# -# A percentage value (between 1% and 100%) means the client eviction threshold -# is based on a percentage of the maxmemory setting. For example to set client -# eviction at 5% of maxmemory: -# maxmemory-clients 5% - -# In the server protocol, bulk requests, that are, elements representing single -# strings, are normally limited to 512 mb. However you can change this limit -# here, but must be 1mb or greater -# -# proto-max-bulk-len 512mb - -# The server calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but the server checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# the server is idle, but at the same time will make the server more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# Normally it is useful to have an HZ value which is proportional to the -# number of clients connected. This is useful in order, for instance, to -# avoid too many clients are processed for each background task invocation -# in order to avoid latency spikes. -# -# Since the default HZ value by default is conservatively set to 10, the server -# offers, and enables by default, the ability to use an adaptive HZ value -# which will temporarily raise when there are many connected clients. -# -# When dynamic HZ is enabled, the actual configured HZ will be used -# as a baseline, but multiples of the configured HZ value will be actually -# used as needed once more clients are connected. In this way an idle -# instance will use very little CPU time while a busy instance will be -# more responsive. -dynamic-hz yes - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 4 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - -# When the server saves RDB file, if the following option is enabled -# the file will be fsync-ed every 4 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -rdb-save-incremental-fsync yes - -# The server's LFU eviction (see maxmemory setting) can be tuned. However it is a good -# idea to start with the default settings and only change them after investigating -# how to improve the performances and how the keys LFU change over time, which -# is possible to inspect via the OBJECT FREQ command. -# -# There are two tunable parameters in the server LFU implementation: the -# counter logarithm factor and the counter decay time. It is important to -# understand what the two parameters mean before changing them. -# -# The LFU counter is just 8 bits per key, it's maximum value is 255, so the server -# uses a probabilistic increment with logarithmic behavior. Given the value -# of the old counter, when a key is accessed, the counter is incremented in -# this way: -# -# 1. A random number R between 0 and 1 is extracted. -# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). -# 3. The counter is incremented only if R < P. -# -# The default lfu-log-factor is 10. This is a table of how the frequency -# counter changes with a different number of accesses with different -# logarithmic factors: -# -# +--------+------------+------------+------------+------------+------------+ -# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | -# +--------+------------+------------+------------+------------+------------+ -# | 0 | 104 | 255 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 1 | 18 | 49 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 10 | 10 | 18 | 142 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 100 | 8 | 11 | 49 | 143 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# -# NOTE: The above table was obtained by running the following commands: -# -# valkey-benchmark -n 1000000 incr foo -# valkey-cli object freq foo -# -# NOTE 2: The counter initial value is 5 in order to give new objects a chance -# to accumulate hits. -# -# The counter decay time is the time, in minutes, that must elapse in order -# for the key counter to be decremented. -# -# The default value for the lfu-decay-time is 1. A special value of 0 means we -# will never decay the counter. -# -# lfu-log-factor 10 -# lfu-decay-time 1 - - -# The maximum number of new client connections accepted per event-loop cycle. This configuration -# is set independently for TLS connections. -# -# By default, up to 10 new connection will be accepted per event-loop cycle for normal connections -# and up to 1 new connection per event-loop cycle for TLS connections. -# -# Adjusting this to a larger number can slightly improve efficiency for new connections -# at the risk of causing timeouts for regular commands on established connections. It is -# not advised to change this without ensuring that all clients have limited connection -# pools and exponential backoff in the case of command/connection timeouts. -# -# If your application is establishing a large number of new connections per second you should -# also consider tuning the value of tcp-backlog, which allows the kernel to buffer more -# pending connections before dropping or rejecting connections. -# -# max-new-connections-per-cycle 10 -# max-new-tls-connections-per-cycle 1 - - -########################### ACTIVE DEFRAGMENTATION ####################### -# -# What is active defragmentation? -# ------------------------------- -# -# Active (online) defragmentation allows a server to compact the -# spaces left between small allocations and deallocations of data in memory, -# thus allowing to reclaim back memory. -# -# Fragmentation is a natural process that happens with every allocator (but -# less so with Jemalloc, fortunately) and certain workloads. Normally a server -# restart is needed in order to lower the fragmentation, or at least to flush -# away all the data and create it again. However thanks to this feature -# implemented by Oran Agra, this process can happen at runtime -# in a "hot" way, while the server is running. -# -# Basically when the fragmentation is over a certain level (see the -# configuration options below) the server will start to create new copies of the -# values in contiguous memory regions by exploiting certain specific Jemalloc -# features (in order to understand if an allocation is causing fragmentation -# and to allocate it in a better place), and at the same time, will release the -# old copies of the data. This process, repeated incrementally for all the keys -# will cause the fragmentation to drop back to normal values. -# -# Important things to understand: -# -# 1. This feature is disabled by default, and only works if you compiled the server -# to use the copy of Jemalloc we ship with the source code of the server. -# This is the default with Linux builds. -# -# 2. You never need to enable this feature if you don't have fragmentation -# issues. -# -# 3. Once you experience fragmentation, you can enable this feature when -# needed with the command "CONFIG SET activedefrag yes". -# -# The configuration parameters are able to fine tune the behavior of the -# defragmentation process. If you are not sure about what they mean it is -# a good idea to leave the defaults untouched. - -# Active defragmentation is disabled by default -# activedefrag no - -# Minimum amount of fragmentation waste to start active defrag -# active-defrag-ignore-bytes 100mb - -# Minimum percentage of fragmentation to start active defrag -# active-defrag-threshold-lower 10 - -# Maximum percentage of fragmentation at which we use maximum effort -# active-defrag-threshold-upper 100 - -# Minimal effort for defrag in CPU percentage, to be used when the lower -# threshold is reached -# active-defrag-cycle-min 1 - -# Maximal effort for defrag in CPU percentage, to be used when the upper -# threshold is reached -# active-defrag-cycle-max 25 - -# Maximum number of set/hash/zset/list fields that will be processed from -# the main dictionary scan -# active-defrag-max-scan-fields 1000 - -# Jemalloc background thread for purging will be enabled by default -jemalloc-bg-thread yes - -# It is possible to pin different threads and processes of the server to specific -# CPUs in your system, in order to maximize the performances of the server. -# This is useful both in order to pin different server threads in different -# CPUs, but also in order to make sure that multiple server instances running -# in the same host will be pinned to different CPUs. -# -# Normally you can do this using the "taskset" command, however it is also -# possible to do this via the server configuration directly, both in Linux and FreeBSD. -# -# You can pin the server/IO threads, bio threads, aof rewrite child process, and -# the bgsave child process. The syntax to specify the cpu list is the same as -# the taskset command: -# -# Set server/io threads to cpu affinity 0,2,4,6: -# server-cpulist 0-7:2 -# -# Set bio threads to cpu affinity 1,3: -# bio-cpulist 1,3 -# -# Set aof rewrite child process to cpu affinity 8,9,10,11: -# aof-rewrite-cpulist 8-11 -# -# Set bgsave child process to cpu affinity 1,10,11 -# bgsave-cpulist 1,10-11 - -# In some cases the server will emit warnings and even refuse to start if it detects -# that the system is in bad state, it is possible to suppress these warnings -# by setting the following config which takes a space delimited list of warnings -# to suppress -# -# ignore-warnings ARM64-COW-BUG - -# Inform Valkey of the availability zone if running in a cloud environment. Currently -# this is only exposed via the info command for clients to use, but in the future we -# we may also use this when making decisions for replication. -# -# availability-zone "zone-name" diff --git a/pyproject.toml b/pyproject.toml index 871ef83..887be32 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,8 +27,13 @@ keywords = [ "example", ] dependencies = [ - "colorlog>=4.2.1", - "xmltodict", + "Flask", + "Flask-Cors>=3.0.3", + "flask-restful-swagger-2>=0.35", + "colorlog>=3.1.4", + "python-json-logger", + "requests>=2.20.0", + "cloudevents", ] [project.optional-dependencies] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 3ce41f4..0000000 --- a/requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -# ============================================================================= -# DEPRECATION WARNING: -# -# The file `requirements.txt` does not influence the package dependencies and -# will not be automatically created in the next version of PyScaffold (v4.x). -# -# Please have look at the docs for better alternatives -# (`Dependency Management` section). -# ============================================================================= -# -# Add your pinned requirements so that they can be easily installed with: -# pip install -r requirements.txt -# Remember to also add them in setup.cfg but unpinned. -# Example: -# numpy==1.13.3 -# scipy==1.0 -# diff --git a/ruff.toml b/ruff.toml index f43f568..e6ead58 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,15 +1,26 @@ +# A005 Module is shadowing a Python builtin module +# ANN001 Missing type annotation for function argument +# ANN201 Missing return type annotation for public function +# ANN202 Missing return type annotation for private function +# E501 Line too long (handled already with other linters) +# FA102 future-required-type-annotation +# N802 Function name should be lowercase +# N816 Variable in global scope should not be mixedCas +# PLC0415 import-outside-top-level # PLR0913 too-many-arguments (of function definition) # PLR0917 too-many-positional-arguments (of function definition) +# PLR2004 magic-value-comparison (unnamed numerical constants ("magic") values) +# PLR6301 Method could be a function, class method, or static method # PLW0603 (checks for use of) global-statement # S107 hardcoded-password-default +# S113 Probable use of `requests` call without timeout # S606 start-process-with-no-shell -# FA102 future-required-type-annotation -# PLC0415 import-outside-top-level -# PLR2004 magic-value-comparison (unnamed numerical constants ("magic") values) +# SIM102 Use a single `if` statement instead of nested `if` statement -lint.ignore = ["PLR0913", "PLR0917", "PLW0603", "S107", "S606", "FA102"] +lint.ignore = ["ANN001", "ANN201", "ANN202", "E501", "FA102", "N802", "N816", "PLR0913", "PLR0917", "PLR6301", "PLW0603", "S107", "S113", "S606"] [lint.per-file-ignores] "tests/testsuite.py" = [ "PLC0415",] -"tests/integrationtests/test_helloworld.py" = [ "PLR2004",] -"tests/integrationtests/test_projecthelloworld.py" = [ "PLR2004",] +"src/actinia_cloudevent_plugin/resources/logging.py" = ["A005",] +"src/actinia_cloudevent_plugin/resources/config.py" = ["SIM102",] +"tests/integrationtests/test_cloudevent.py" = ["PLR2004",] diff --git a/src/actinia_cloudevent_plugin/api/__init__.py b/src/actinia_cloudevent_plugin/api/__init__.py index a59ed06..bc30294 100644 --- a/src/actinia_cloudevent_plugin/api/__init__.py +++ b/src/actinia_cloudevent_plugin/api/__init__.py @@ -1,4 +1,4 @@ -"""actinia-example-plguin API part of package. +"""actinia-cloudevent-plugin API part of package. This part provides the API part of the actinia-cloudevent-plugin. """ diff --git a/src/actinia_cloudevent_plugin/api/cloudevent.py b/src/actinia_cloudevent_plugin/api/cloudevent.py new file mode 100644 index 0000000..cfcf4cf --- /dev/null +++ b/src/actinia_cloudevent_plugin/api/cloudevent.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python +"""Copyright (c) 2025 mundialis GmbH & Co. KG. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +Hello World class +""" + +__license__ = "GPLv3" +__author__ = "Lina Krisztian" +__copyright__ = "Copyright 2025 mundialis GmbH & Co. KG" +__maintainer__ = "mundialis GmbH & Co. KG" + +from flask import jsonify, make_response +from flask_restful_swagger_2 import Resource, swagger +from requests.exceptions import ConnectionError # noqa: A004 + +from actinia_cloudevent_plugin.apidocs import cloudevent +from actinia_cloudevent_plugin.core.processing import ( + cloud_event_to_process_chain, + receive_cloud_event, + send_binary_cloud_event, + # send_structured_cloud_event, +) +from actinia_cloudevent_plugin.model.response_models import ( + SimpleStatusCodeResponseModel, +) +from actinia_cloudevent_plugin.resources.config import EVENTRECEIVER + + +class Cloudevent(Resource): + """Cloudevent handling.""" + + def __init__(self) -> None: + """Cloudevent class initialisation.""" + self.msg = ( + "Received event and returned event " + " with actinia-job ." + ) + + def get(self): + """Cloudevent get method: not allowed response.""" + res = jsonify( + SimpleStatusCodeResponseModel( + status=405, + message="Method Not Allowed", + ), + ) + return make_response(res, 405) + + @swagger.doc(cloudevent.describe_cloudevent_post_docs) + def post(self) -> SimpleStatusCodeResponseModel: + """Cloudevent post method with cloudevent from postbody. + + Receives cloudevent, transforms to process chain (pc), + sends pc to actinia + start process, + and returns cloudevent with queue name. + """ + # Transform postbody to cloudevent + event_received = receive_cloud_event() + # With received process chain start actinia process + return cloudevent + actinia_job = cloud_event_to_process_chain(event_received) + # URL to which the generated cloudevent is sent + url = EVENTRECEIVER.url + # TODO: binary or structured cloud event? + # From https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md#message + # A "structured-mode message" is one where the entire event (attributes and data) + # are encoded in the message body, according to a specific event format. + # A "binary-mode message" is one where the event data is stored in the message body, + # and event attributes are stored as part of message metadata. + # Often, binary mode is used when the producer of the CloudEvent wishes to add the + # CloudEvent's metadata to an existing event without impacting the message's body. + # In most cases a CloudEvent encoded as a binary-mode message will not break an + # existing receiver's processing of the event because the message's metadata + # typically allows for extension attributes. + # In other words, a binary formatted CloudEvent would work for both + # a CloudEvents enabled receiver as well as one that is unaware of CloudEvents. + try: + event_returned = send_binary_cloud_event( + event_received, + actinia_job, + url, + ) + return SimpleStatusCodeResponseModel( + status=204, + message=self.msg.replace("", event_received["id"]) + .replace("", event_returned["id"]) + .replace("", actinia_job), + ) + except ConnectionError as e: + return f"Connection ERROR when returning cloudevent: {e}" + except Exception() as e: + return f"ERROR when returning cloudevent: {e}" diff --git a/src/actinia_cloudevent_plugin/api/helloworld.py b/src/actinia_cloudevent_plugin/api/helloworld.py deleted file mode 100644 index a9fe877..0000000 --- a/src/actinia_cloudevent_plugin/api/helloworld.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python -"""Copyright (c) 2018-2024 mundialis GmbH & Co. KG. - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -Hello World class -""" - -__license__ = "GPLv3" -__author__ = "Anika Weinmann" -__copyright__ = "Copyright 2022 mundialis GmbH & Co. KG" -__maintainer__ = "mundialis GmbH & Co. KG" - - -from flask import make_response, request -from flask_restful_swagger_2 import Resource, swagger - -from actinia_cloudevent_plugin.apidocs import helloworld -from actinia_cloudevent_plugin.core.example import transform_input -from actinia_cloudevent_plugin.model.response_models import ( - SimpleStatusCodeResponseModel, -) - - -class HelloWorld(Resource): - """Returns 'Hello world!'.""" - - def __init__(self) -> None: - """Hello world class initialisation.""" - self.msg = "Hello world!" - - @swagger.doc(helloworld.describe_hello_world_get_docs) - def get(self) -> SimpleStatusCodeResponseModel: - """Get 'Hello world!' as answer string.""" - return SimpleStatusCodeResponseModel(status=200, message=self.msg) - - @swagger.doc(helloworld.describe_hello_world_post_docs) - def post(self) -> SimpleStatusCodeResponseModel: - """Hello World post method with name from postbody.""" - req_data = request.get_json(force=True) - if isinstance(req_data, dict) is False or "name" not in req_data: - return make_response("Missing name in JSON content", 400) - name = req_data["name"] - msg = f"{self.msg} {transform_input(name)}" - - return SimpleStatusCodeResponseModel(status=200, message=msg) diff --git a/src/actinia_cloudevent_plugin/api/project_helloworld.py b/src/actinia_cloudevent_plugin/api/project_helloworld.py deleted file mode 100644 index be0dfef..0000000 --- a/src/actinia_cloudevent_plugin/api/project_helloworld.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python -"""Copyright (c) 2018-2024 mundialis GmbH & Co. KG. - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -Hello World class -""" - -__license__ = "GPLv3" -__author__ = "Anika Weinmann" -__copyright__ = "Copyright 2024 mundialis GmbH & Co. KG" -__maintainer__ = "mundialis GmbH & Co. KG" - - -from typing import ClassVar - -from actinia_core.models.response_models import SimpleResponseModel -from actinia_core.rest.base.deprecated_locations import ( - location_deprecated_decorator, -) -from flask import jsonify, make_response, request -from flask.wrappers import Response -from flask_restful_swagger_2 import Resource, swagger - -from actinia_cloudevent_plugin.apidocs import project_helloworld -from actinia_cloudevent_plugin.core.example import transform_input - - -class ProjectHelloWorld(Resource): - """Returns 'Hello world with project/location!'.""" - - decorators: ClassVar[list] = [] - - # Add decorators for deprecated GRASS GIS locations - decorators.append(location_deprecated_decorator) - - def __init__(self) -> None: - """Project hello world class initialisation.""" - self.msg = "Project: Hello world!" - - @swagger.doc(project_helloworld.describe_project_hello_world_get_docs) - def get(self, project_name: str) -> Response: - """Get 'Hello world!' as answer string.""" - msg = f"{self.msg} {project_name}" - return make_response( - jsonify( - SimpleResponseModel( - status="200", - message=msg, - ), - ), - 200, - ) - - @swagger.doc(project_helloworld.describe_project_hello_world_post_docs) - def post(self, project_name: str) -> Response: - """Hello World post method with name from postbody.""" - req_data = request.get_json(force=True) - if isinstance(req_data, dict) is False or "name" not in req_data: - return make_response("Missing name in JSON content", 400) - name = req_data["name"] - msg = f"{self.msg} {transform_input(name)} {project_name}" - - return make_response( - jsonify( - SimpleResponseModel( - status="200", - message=msg, - ), - ), - 200, - ) diff --git a/src/actinia_cloudevent_plugin/apidocs/__init__.py b/src/actinia_cloudevent_plugin/apidocs/__init__.py index 613195c..f35e21b 100644 --- a/src/actinia_cloudevent_plugin/apidocs/__init__.py +++ b/src/actinia_cloudevent_plugin/apidocs/__init__.py @@ -1,4 +1,4 @@ -"""actinia-example-plguin API DOCs part of package. +"""actinia-cloudevent-plugin API DOCs part of package. This part provides the API DOCs part of the actinia-cloudevent-plugin. """ diff --git a/src/actinia_cloudevent_plugin/apidocs/helloworld.py b/src/actinia_cloudevent_plugin/apidocs/cloudevent.py similarity index 50% rename from src/actinia_cloudevent_plugin/apidocs/helloworld.py rename to src/actinia_cloudevent_plugin/apidocs/cloudevent.py index 46734fd..627f003 100644 --- a/src/actinia_cloudevent_plugin/apidocs/helloworld.py +++ b/src/actinia_cloudevent_plugin/apidocs/cloudevent.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -"""Copyright (c) 2018-2024 mundialis GmbH & Co. KG. +"""Copyright (c) 2025 mundialis GmbH & Co. KG. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -18,8 +18,8 @@ """ __license__ = "GPLv3" -__author__ = "Anika Weinmann" -__copyright__ = "Copyright 2022 mundialis GmbH & Co. KG" +__author__ = "Lina Krisztian" +__copyright__ = "Copyright 2025 mundialis GmbH & Co. KG" __maintainer__ = "mundialis GmbH & Co. KG" @@ -27,40 +27,23 @@ SimpleStatusCodeResponseModel, ) -describe_hello_world_get_docs = { +describe_cloudevent_post_docs = { # "summary" is taken from the description of the get method - "tags": ["example"], - "description": "Hello World example", + "tags": ["cloudevent"], + "description": ( + "Receives cloudevent, transforms and starts pc and returns cloudevent." + ), "responses": { "200": { - "description": "This response returns the string 'Hello World!'", - "schema": SimpleStatusCodeResponseModel, - }, - }, -} - -describe_hello_world_post_docs = { - # "summary" is taken from the description of the get method - "tags": ["example"], - "description": "Hello World example with name", - "responses": { - "200": { - "description": "This response returns the string 'Hello World " - "NAME!'", + "description": ( + "This response returns received, and returned events, " + "generated queue name and the status" + ), "schema": SimpleStatusCodeResponseModel, }, "400": { - "description": "This response returns a detail error message", - "schema": { - "type": "object", - "properties": { - "message": { - "type": "string", - "description": "detailed message", - "example": "Missing name in JSON content", - }, - }, - }, + "description": "This response returns an error message", + "schema": SimpleStatusCodeResponseModel, }, }, } diff --git a/src/actinia_cloudevent_plugin/apidocs/project_helloworld.py b/src/actinia_cloudevent_plugin/apidocs/project_helloworld.py deleted file mode 100644 index b9c36d2..0000000 --- a/src/actinia_cloudevent_plugin/apidocs/project_helloworld.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python -"""Copyright (c) 2018-2024 mundialis GmbH & Co. KG. - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -Hello World class -""" - -__license__ = "GPLv3" -__author__ = "Anika Weinmann" -__copyright__ = "Copyright 2024 mundialis GmbH & Co. KG" -__maintainer__ = "mundialis GmbH & Co. KG" - - -from actinia_cloudevent_plugin.model.response_models import ( - SimpleStatusCodeResponseModel, -) - -describe_project_hello_world_get_docs = { - # "summary" is taken from the description of the get method - "tags": ["example"], - "description": "Project Hello World example", - "parameters": [ - { - "name": "project_name", - "description": "The project name that contains the data that " - "should be processed", - "required": True, - "in": "path", - "type": "string", - "default": "nc_spm_08", - }, - ], - "responses": { - "200": { - "description": "This response returns the string 'Hello World!'", - "schema": SimpleStatusCodeResponseModel, - }, - }, -} - -describe_project_hello_world_post_docs = { - # "summary" is taken from the description of the get method - "tags": ["example"], - "description": "Project Hello World example with name", - "parameters": [ - { - "name": "project_name", - "description": "The project name that contains the data that " - "should be processed", - "required": True, - "in": "path", - "type": "string", - "default": "nc_spm_08", - }, - ], - "responses": { - "200": { - "description": "This response returns the string 'Hello World " - "NAME!'", - "schema": SimpleStatusCodeResponseModel, - }, - "400": { - "description": "This response returns a detail error message", - "schema": { - "type": "object", - "properties": { - "message": { - "type": "string", - "description": "detailed message", - "example": "Missing name in JSON content", - }, - }, - }, - }, - }, -} diff --git a/src/actinia_cloudevent_plugin/core/__init__.py b/src/actinia_cloudevent_plugin/core/__init__.py index 51ca76e..2f057b7 100644 --- a/src/actinia_cloudevent_plugin/core/__init__.py +++ b/src/actinia_cloudevent_plugin/core/__init__.py @@ -1,4 +1,4 @@ -"""actinia-example-plguin core part of package. +"""actinia-cloudevent-plugin core part of package. This part provides the core part of the actinia-cloudevent-plugin. """ diff --git a/src/actinia_cloudevent_plugin/core/example.py b/src/actinia_cloudevent_plugin/core/example.py deleted file mode 100644 index 0c50b4d..0000000 --- a/src/actinia_cloudevent_plugin/core/example.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python -"""Copyright (c) 2018-2024 mundialis GmbH & Co. KG. - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -Example core functionality -""" - -__license__ = "GPLv3" -__author__ = "Anika Weinmann" -__copyright__ = "Copyright 2022 mundialis GmbH & Co. KG" -__maintainer__ = "mundialis GmbH & Co. KG" - - -def transform_input(inp: str) -> str: - """Return a transformed string as example core function. - - Args: - inp (str): Input string to transform - - Returns: - (str) transformed string - - """ - return f"Hello world {inp.upper()}!" diff --git a/src/actinia_cloudevent_plugin/core/processing.py b/src/actinia_cloudevent_plugin/core/processing.py new file mode 100644 index 0000000..3a201fe --- /dev/null +++ b/src/actinia_cloudevent_plugin/core/processing.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python +"""Copyright (c) 2025 mundialis GmbH & Co. KG. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +Example core functionality +""" + +__license__ = "GPLv3" +__author__ = "Lina Krisztian" +__copyright__ = "Copyright 2025 mundialis GmbH & Co. KG" +__maintainer__ = "mundialis GmbH & Co. KG" + + +import requests +from cloudevents.conversion import to_binary, to_structured +from cloudevents.http import CloudEvent, from_http +from flask import request + + +def receive_cloud_event(): + """Return cloudevent from postpody.""" + # Parses CloudEvent 'data' and 'headers' into a CloudEvent. + event = from_http(request.headers, request.get_data()) + + # ? TODO + # eventually Filter the event (see example below) + event_type = event["type"] + if event_type == "com.example.object.created": + print("Object created event received!") + + return event + + +def cloud_event_to_process_chain(event) -> str: + """Return queue name for process chain of event.""" + # (Remove ruff-exception, when pc variable used) + pc = event.get_data()["list"][0] # noqa: F841 + # !! TODO !!: pc to job + # NOTE: as standalone app -> consider for queue name creation + # HTTP POST pc to actinia-module plugin processing endpoint + # # # include an identifier for grouping cloudevents of same actinia process (?) + # # # (e.g. new metadata field "queue_name", or within data, or use existign id) + # -> actinia core returns resource-url, including resource_id (and queue name) + # (queuename = xx_; if configured accordingly within actinia -> each job own queue) + # via knative jobsink: start actinia worker (with queue name) + # (https://knative.dev/docs/eventing/sinks/job-sink/#usage) + # e.g. HTTP POST with queue name + # kubectl run curl --image=curlimages/curl --rm=true --restart=Never -ti -- -X POST -v \ + # -H "content-type: application/json" \ + # -H "ce-specversion: 1.0" \ + # -H "ce-source: my/curl/command" \ + # -H "ce-type: my.demo.event" \ + # -H "ce-id: 123" \ + # -d '{"details":"queuename"}' \ + # http://job-sink.knative-eventing.svc.cluster.local/default/job-sink-logger + return "_" # queue name and resource id + + +def send_binary_cloud_event(event, actinia_job, url): + """Return posted binary event with actinia_job.""" + attributes = { + "specversion": event["specversion"], + "source": "/actinia-cloudevent-plugin", + "type": "com.mundialis.actinia.process.started", + "subject": event["subject"], + "datacontenttype": "application/json", + } + data = {"actinia_job": actinia_job} + + event = CloudEvent(attributes, data) + headers, body = to_binary(event) + # send event + requests.post(url, headers=headers, data=body) + + return event + + +def send_structured_cloud_event(event, actinia_job, url): + """Return posted structured event with actinia_job.""" + attributes = { + "specversion": event["specversion"], + "source": "/actinia-cloudevent-plugin", + "type": "com.mundialis.actinia.process.started", + "subject": event["subject"], + "datacontenttype": "application/json", + } + data = {"actinia_job": actinia_job} + + event = CloudEvent(attributes, data) + headers, body = to_structured(event) + # send event + requests.post(url, headers=headers, data=body) + + return event diff --git a/src/actinia_cloudevent_plugin/endpoints.py b/src/actinia_cloudevent_plugin/endpoints.py index 5b28f4f..8292420 100644 --- a/src/actinia_cloudevent_plugin/endpoints.py +++ b/src/actinia_cloudevent_plugin/endpoints.py @@ -18,46 +18,46 @@ """ __license__ = "GPLv3" -__author__ = "Carmen Tawalika, Anika Weinmann" +__author__ = "Carmen Tawalika, Anika Weinmann, Lina Krisztian" __copyright__ = "Copyright 2022-2024 mundialis GmbH & Co. KG" __maintainer__ = "mundialis GmbH & Co. KG" -from actinia_core.endpoints import get_endpoint_class_name -from flask_restful_swagger_2 import Api - -from actinia_cloudevent_plugin.api.helloworld import HelloWorld -from actinia_cloudevent_plugin.api.project_helloworld import ProjectHelloWorld - -def create_project_endpoints( - apidoc: Api, - projects_url_part: str = "projects", -) -> None: - """Add resources with "project" inside the endpoint url to the api. +import sys - Args: - apidoc (Api): Flask api - projects_url_part (str): The name of the projects inside the endpoint - URL; to add deprecated location endpoints set - it to "locations" +import werkzeug +from flask import current_app, send_from_directory +from flask_restful_swagger_2 import Api - """ - apidoc.add_resource( - ProjectHelloWorld, - f"/helloworld/{projects_url_part}/", - endpoint=get_endpoint_class_name(ProjectHelloWorld, projects_url_part), - ) +from actinia_cloudevent_plugin.api.cloudevent import Cloudevent +from actinia_cloudevent_plugin.resources.logging import log # endpoints loaded if run as actinia-core plugin as well as standalone app def create_endpoints(flask_api: Api) -> None: """Create plugin endpoints.""" + app = flask_api.app apidoc = flask_api - apidoc.add_resource(HelloWorld, "/helloworld") - - # add deprecated location endpoints - create_project_endpoints(apidoc, projects_url_part="locations") - - # add project endpoints - create_project_endpoints(apidoc, projects_url_part="projects") + package = sys._getframe().f_back.f_globals["__package__"] # noqa: SLF001 + if package != "actinia_core": + + @app.route("/") + def index(): + try: + return current_app.send_static_file("index.html") + except werkzeug.exceptions.NotFound: + log.debug("No index.html found. Serving backup.") + # when actinia-cloudevent-plugin is installed in single mode, + # the swagger endpoint would be "latest/api/swagger.json". + # As api docs exist in single mode, + # use this fallback for plugin mode. + return """

actinia-metadata-plugin

+ API docs""" + + @app.route("/") + def static_content(filename): + # WARNING: all content from folder "static" will be accessible! + return send_from_directory(app.static_folder, filename) + + apidoc.add_resource(Cloudevent, "/") diff --git a/src/actinia_cloudevent_plugin/main.py b/src/actinia_cloudevent_plugin/main.py new file mode 100644 index 0000000..031ae70 --- /dev/null +++ b/src/actinia_cloudevent_plugin/main.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +"""Copyright (c) 2025 mundialis GmbH & Co. KG. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +Application entrypoint. Creates Flask app and swagger docs, adds endpoints +""" + +__author__ = "Carmen Tawalika, Lina Krisztian" +__copyright__ = "2025-present mundialis GmbH & Co. KG" +__license__ = "Apache-2.0" + + +from flask import Flask +from flask_cors import CORS +from flask_restful_swagger_2 import Api + +from actinia_cloudevent_plugin.endpoints import create_endpoints +from actinia_cloudevent_plugin.resources.logging import log + +flask_app = Flask(__name__) +# allows endpoints with and without trailing slashes +flask_app.url_map.strict_slashes = False +CORS(flask_app) + + +API_VERSION = "v1" + +URL_PREFIX = f"/api/{API_VERSION}" + +apidoc = Api( + flask_app, + title="actinia-cloudevent-plugin", + prefix=URL_PREFIX, + api_version=API_VERSION, + api_spec_url=f"{URL_PREFIX}/swagger", + schemes=["https", "http"], + consumes=["application/json"], + description="""Receives cloudevent, + transforms it to an actinia process chain + and returns cloudevent back. + """, +) + +create_endpoints(apidoc) + + +if __name__ == "__main__": + # call this for development only with: + # `python3 -m actinia_cloudevent_plugin.main` + log.debug("starting app in development mode...") + # ruff: S201 :Use of `debug=True` in Flask app detected + flask_app.run(debug=True, use_reloader=False) # noqa: S201 + # for production environent use application in wsgi.py diff --git a/src/actinia_cloudevent_plugin/model/__init__.py b/src/actinia_cloudevent_plugin/model/__init__.py index 53a1d4d..376c7d1 100644 --- a/src/actinia_cloudevent_plugin/model/__init__.py +++ b/src/actinia_cloudevent_plugin/model/__init__.py @@ -1,4 +1,4 @@ -"""actinia-example-plguin model part of package. +"""actinia-cloudevent-plugin model part of package. This part provides the model part of the actinia-cloudevent-plugin. """ diff --git a/src/actinia_cloudevent_plugin/resources/__init__.py b/src/actinia_cloudevent_plugin/resources/__init__.py new file mode 100644 index 0000000..a90abc6 --- /dev/null +++ b/src/actinia_cloudevent_plugin/resources/__init__.py @@ -0,0 +1,4 @@ +"""actinia-cloudevent-plugin resources part of package. + +This part provides the Resources part of the actinia-cloudevent-plugin. +""" diff --git a/src/actinia_cloudevent_plugin/resources/config.py b/src/actinia_cloudevent_plugin/resources/config.py new file mode 100644 index 0000000..8ac0be7 --- /dev/null +++ b/src/actinia_cloudevent_plugin/resources/config.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +"""Copyright (c) 2018-2025 mundialis GmbH & Co. KG. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +Configuration file +""" + +__author__ = "Carmen Tawalika, Lina Krisztian" +__copyright__ = "2018-2025 mundialis GmbH & Co. KG" +__license__ = "Apache-2.0" + + +import configparser +from pathlib import Path + +# config can be overwritten by mounting *.ini files into folders inside +# the config folder. +DEFAULT_CONFIG_PATH = "config" +CONFIG_FILES = [ + str(f) for f in Path(DEFAULT_CONFIG_PATH).glob("**/*.ini") if f.is_file() +] +GENERATED_CONFIG = DEFAULT_CONFIG_PATH + "/actinia-cloudevent-plugin.cfg" + + +class EVENTRECEIVER: + """Default config for cloudevent receiver.""" + + url = "http://localhost:3000/" + + +class LOGCONFIG: + """Default config for logging.""" + + logfile = "actinia-cloudevent-plugin.log" + level = "INFO" + type = "stdout" + + +class Configfile: + """Configuration file.""" + + def __init__(self) -> None: + """Overwrite config classes. + + Will overwrite the config classes above when config files + named DEFAULT_CONFIG_PATH/**/*.ini exist. + On first import of the module it is initialized. + """ + config = configparser.ConfigParser() + config.read(CONFIG_FILES) + if len(config) <= 1: + print("Could not find any config file, using default values.") + return + print("Loading config files: " + str(CONFIG_FILES) + " ...") + + with open( # noqa: PTH123 + GENERATED_CONFIG, + "w", + encoding="utf-8", + ) as configfile: + config.write(configfile) + print("Configuration written to " + GENERATED_CONFIG) + + # LOGGING + if config.has_section("LOGCONFIG"): + if config.has_option("LOGCONFIG", "logfile"): + LOGCONFIG.logfile = config.get("LOGCONFIG", "logfile") + if config.has_option("LOGCONFIG", "level"): + LOGCONFIG.level = config.get("LOGCONFIG", "level") + if config.has_option("LOGCONFIG", "type"): + LOGCONFIG.type = config.get("LOGCONFIG", "type") + + # EVENTRECEIVER + if config.has_section("EVENTRECEIVER"): + if config.has_option("EVENTRECEIVER", "url"): + EVENTRECEIVER.url = config.get("EVENTRECEIVER", "url") + + +init = Configfile() diff --git a/src/actinia_cloudevent_plugin/resources/logging.py b/src/actinia_cloudevent_plugin/resources/logging.py new file mode 100644 index 0000000..7d4b246 --- /dev/null +++ b/src/actinia_cloudevent_plugin/resources/logging.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python +"""Copyright (c) 2025 mundialis GmbH & Co. KG. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +Logging interface +""" + +__license__ = "Apache-2.0" +__author__ = "Carmen Tawalika" +__copyright__ = "Copyright 2025, mundialis" +__maintainer__ = "" + + +import logging +from datetime import datetime, timezone +from logging import FileHandler + +from colorlog import ColoredFormatter +from pythonjsonlogger import jsonlogger + +from actinia_cloudevent_plugin.resources.config import LOGCONFIG + +# Notice: do not call logging.warning (will create new logger for ever) +# logging.warning("called actinia_cloudevent_plugin logger after 1") + +log = logging.getLogger("actinia-cloudevent-plugin") +werkzeugLog = logging.getLogger("werkzeug") +gunicornLog = logging.getLogger("gunicorn") + + +def set_log_format(veto=None): + """Set format of logs.""" + logformat = "" + if LOGCONFIG.type == "json" and not veto: + logformat = CustomJsonFormatter( + "%(time) %(level) %(component)" + "%(module) %(message) %(pathname)" + "%(lineno) %(processName)" + "%(threadName)", + ) + else: + logformat = ColoredFormatter( + "%(log_color)s[%(asctime)s] %(levelname)-10s: %(name)s.%(module)-" + "10s -%(message)s [in %(pathname)s:%(lineno)d]%(reset)s", + ) + return logformat + + +def set_log_handler(logger, logtype, logformat) -> None: + """Set handling of logs.""" + if logtype == "stdout": + handler = logging.StreamHandler() + elif logtype == "file": + # For readability, json is never written to file + handler = FileHandler(LOGCONFIG.logfile) + handler.setFormatter(logformat) + logger.addHandler(handler) + + +class CustomJsonFormatter(jsonlogger.JsonFormatter): + """Customized formatting of logs as json.""" + + def add_fields(self, log_record, record, message_dict) -> None: + """Add fiels for json log.""" + super(CustomJsonFormatter, self).add_fields( # noqa: UP008 + log_record, + record, + message_dict, + ) + + # (Pdb) dir(record) + # ... 'args', 'created', 'exc_info', 'exc_text', 'filename', 'funcName' + # ,'getMessage', 'levelname', 'levelno', 'lineno', 'message', 'module', + # 'msecs', 'msg', 'name', 'pathname', 'process', 'processName', + # 'relativeCreated', 'stack_info', 'thread', 'threadName'] + now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ") + log_record["time"] = now + log_record["level"] = record.levelname + log_record["component"] = record.name + + +def create_logger() -> None: + """Create logger, set level and define format.""" + log.setLevel(getattr(logging, LOGCONFIG.level)) + fileformat = set_log_format("veto") + stdoutformat = set_log_format() + set_log_handler(log, "file", fileformat) + set_log_handler(log, "stdout", stdoutformat) + + +def create_werkzeug_logger() -> None: + """Create werkzeug-logger, set level and define format.""" + werkzeugLog.setLevel(getattr(logging, LOGCONFIG.level)) + fileformat = set_log_format("veto") + stdoutformat = set_log_format() + set_log_handler(werkzeugLog, "file", fileformat) + set_log_handler(werkzeugLog, "stdout", stdoutformat) + + +def create_gunicorn_logger() -> None: + """Create gunicorn-logger, set level and define format.""" + gunicornLog.setLevel(getattr(logging, LOGCONFIG.level)) + fileformat = set_log_format("veto") + stdoutformat = set_log_format() + set_log_handler(gunicornLog, "file", fileformat) + set_log_handler(gunicornLog, "stdout", stdoutformat) + # gunicorn already has a lot of children logger, e.g gunicorn.http, + # gunicorn.access. These lines deactivate their default handlers. + # pylint: disable=E1101 + for name in logging.root.manager.loggerDict: + if "gunicorn." in name: + logging.getLogger(name).propagate = True + logging.getLogger(name).handlers = [] + + +create_logger() +create_werkzeug_logger() +create_gunicorn_logger() diff --git a/src/actinia_cloudevent_plugin/wsgi.py b/src/actinia_cloudevent_plugin/wsgi.py deleted file mode 100644 index c885f74..0000000 --- a/src/actinia_cloudevent_plugin/wsgi.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -"""Copyright (c) 2018-2024 mundialis GmbH & Co. KG. - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . -""" - -__license__ = "GPLv3" -__author__ = "Carmen Tawalika, Anika Weinmann" -__copyright__ = "Copyright 2022 mundialis GmbH & Co. KG" -__maintainer__ = "mundialis GmbH & Co. KG" diff --git a/tests/cloudevent_receiver_server.py b/tests/cloudevent_receiver_server.py new file mode 100644 index 0000000..cca0d1e --- /dev/null +++ b/tests/cloudevent_receiver_server.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +"""Helper script for generation server, which receives cloudevents.""" +# used from sdk-python +# -> https://github.com/cloudevents/sdk-python/blob/main/samples/http-json-cloudevents/json_sample_server.py + +from cloudevents.http import from_http +from flask import Flask, request + +app = Flask(__name__) + + +@app.route("/", methods=["POST"]) +def home(): + """Server for cloudevent receival.""" + # create a CloudEvent + event = from_http(request.headers, request.get_data()) + + # you can access cloudevent fields as seen below + print( + f"Found {event['id']} from {event['source']} with type " + f"{event['type']} and specversion {event['specversion']}", + ) + + return "", 204 + + +if __name__ == "__main__": + app.run(port=3000) diff --git a/tests/conftest.py b/tests/conftest.py index 4a0c230..e76e31b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -14,7 +14,6 @@ You should have received a copy of the GNU General Public License along with this program. If not, see . -Hello World test """ __license__ = "GPLv3" diff --git a/tests/examples/cloudevent_example.json b/tests/examples/cloudevent_example.json new file mode 100644 index 0000000..274e39e --- /dev/null +++ b/tests/examples/cloudevent_example.json @@ -0,0 +1,20 @@ +{ + "id":"e3525c6d-bbd8-404d-9fa3-1e421dc99c11", + "specversion":"1.0", + "source" : "/apps/ui", + "type":"com.mundialis.actinia.process.send", + "time":"2025-03-28T10:28:48Z", + "subject" : "nc_spm_08/PERMANENT", + "datacontenttype":"application/json", + "data":{ + "list": [ + { + "module": "r.slope.aspect", + "inputs": [{ + "param": "elevation", + "value": "elev_ned_30m@PERMANENT" + }] + } + ] + } +} diff --git a/tests/examples/cloudevent_example_return.json b/tests/examples/cloudevent_example_return.json new file mode 100644 index 0000000..029d90f --- /dev/null +++ b/tests/examples/cloudevent_example_return.json @@ -0,0 +1,12 @@ +{ + "id":"1132f3c5-bcec-4947-b676-1da32279316d", + "specversion":"1.0", + "source" : "/actinia-cloudevent-plugin", + "type":"com.mundialis.actinia.process.started", + "time":"2025-03-28T10:30:48Z", + "subject" : "nc_spm_08/PERMANENT", + "datacontenttype":"application/json", + "data":{ + "actinia_status_url": "todo, if needed" + } +} diff --git a/tests/integrationtests/__init__.py b/tests/integrationtests/__init__.py index 2ff4dcc..ede6cf9 100644 --- a/tests/integrationtests/__init__.py +++ b/tests/integrationtests/__init__.py @@ -1,4 +1,5 @@ """Integration tests of the actinia-example-plguin. -This package part provides the integration tests of the actinia-cloudevent-plugin. +This package part provides the integration tests +of the actinia-cloudevent-plugin. """ diff --git a/tests/integrationtests/test_cloudevent.py b/tests/integrationtests/test_cloudevent.py new file mode 100644 index 0000000..6d86412 --- /dev/null +++ b/tests/integrationtests/test_cloudevent.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python +"""Copyright (c) 2025 mundialis GmbH & Co. KG. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +You should have received a copy of the GNU General Public License +along with this program. If not, see . +""" + +__license__ = "GPLv3" +__author__ = "Lina Krisztian" +__copyright__ = "Copyright 2025 mundialis GmbH & Co. KG" +__maintainer__ = "mundialis GmbH & Co. KG" + + +import json + +import pytest +from flask import Response + +from tests.testsuite import TestCase + +cloudevent_json = { + "id": "e3525c6d-bbd8-404d-9fa3-1e421dc99c11", + "specversion": "1.0", + "source": "/apps/ui", + "type": "com.mundialis.actinia.process.send", + "time": "2025-03-28T10:28:48Z", + "subject": "nc_spm_08/PERMANENT", + "datacontenttype": "application/json", + "data": { + "list": [ + { + "module": "r.slope.aspect", + "inputs": [ + {"param": "elevation", "value": "elev_ned_30m@PERMANENT"}, + ], + }, + ], + }, +} + + +class CloudeventTest(TestCase): + """Cloudevent test class for / endpoint.""" + + @pytest.mark.integrationtest + def test_post_cloudevent(self) -> None: + """Test the post method of the / endpoint.""" + # Expected outcome + # (Note: returned cloudevent id, changes for each request) + # Lenght of response + resp_length = 152 + # Start of response (and according string index) + resp_start = ( + "Received event e3525c6d-bbd8-404d-9fa3-1e421dc99c11" + " and returned event " + ) + resp_start_ind = 71 + # End of response (and according string index) + resp_end = "with actinia-job _." + resp_end_ind = 108 + + # Test post method + resp = self.app.post( + f"{self.URL_PREFIX}/", + data=json.dumps(cloudevent_json), + content_type="application/json", + ) + assert isinstance( + resp, + Response, + ), "The response is not of type Response" + assert resp.status_code == 200, "The status code is not 200" + assert hasattr(resp, "json"), "The response has no attribute 'json'" + assert ( + "message" in resp.json + ), "There is no 'message' inside the response" + assert len(resp.json["message"]) == resp_length, ( + "The length of response message is wrong. " + f"{len(resp.json['message'])}, instead of {resp_length}." + ) + assert resp.json["message"][:resp_start_ind] == resp_start, ( + "The start of response message is wrong. " + f"'{resp.json['message'][:resp_start_ind]}', " + f"instead of '{resp_start}'." + ) + assert resp.json["message"][resp_end_ind:] == resp_end, ( + "The end of response message is wrong. " + f"'{resp.json['message'][resp_end_ind:]}', " + f"instead of '{resp_end}'." + ) + + @pytest.mark.integrationtest + def test_get_cloudevent(self) -> None: + """Test the get method of the / endpoint.""" + resp = self.app.get(f"{self.URL_PREFIX}/") + assert isinstance( + resp, + Response, + ), "The response is not of type Response" + assert ( + resp.status_code == 405 + ), f"The status code is not 405 but {resp.status_code}." + assert hasattr(resp, "json"), "The response has no attribute 'json'" + assert ( + "message" in resp.json + ), "There is no 'message' inside the response" + assert resp.json["message"] == "Method Not Allowed", ( + f"The response is wrong. '{resp.json['message']}'," + "instead of 'Method Not Allowed'" + ) diff --git a/tests/integrationtests/test_helloworld.py b/tests/integrationtests/test_helloworld.py deleted file mode 100644 index 383cd64..0000000 --- a/tests/integrationtests/test_helloworld.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python -"""Copyright (c) 2018-2025 mundialis GmbH & Co. KG. - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -Hello World test -""" - -__license__ = "GPLv3" -__author__ = "Anika Weinmann" -__copyright__ = "Copyright 2022 mundialis GmbH & Co. KG" -__maintainer__ = "mundialis GmbH & Co. KG" - - -import json - -import pytest -from actinia_api import URL_PREFIX -from flask import Response - -from tests.testsuite import ActiniaTestCase - - -class ActiniaHelloWorldTest(ActiniaTestCase): - """Actinia hello world test class for hello world endpoint.""" - - @pytest.mark.integrationtest - def test_get_helloworld(self) -> None: - """Test the get method of the /helloworld endpoint.""" - resp = self.app.get(f"{URL_PREFIX}/helloworld") - - assert isinstance( - resp, - Response, - ), "The response is not of type Response" - assert resp.status_code == 200, "The status code is not 200" - assert hasattr(resp, "json"), "The response has no attribute 'json'" - assert ( - "message" in resp.json - ), "There is no 'message' inside the response" - assert ( - resp.json["message"] == "Hello world!" - ), "The response message is wrong" - - @pytest.mark.integrationtest - def test_post_helloworld(self) -> None: - """Test the post method of the /helloworld endpoint.""" - postbody = {"name": "test"} - resp = self.app.post( - f"{URL_PREFIX}/helloworld", - headers=self.user_auth_header, - data=json.dumps(postbody), - content_type="application/json", - ) - assert isinstance( - resp, - Response, - ), "The response is not of type Response" - assert resp.status_code == 200, "The status code is not 200" - assert hasattr(resp, "json"), "The response has no attribute 'json'" - assert ( - "message" in resp.json - ), "There is no 'message' inside the response" - assert ( - resp.json["message"] == "Hello world! Hello world TEST!" - ), "The response message is wrong" - - @pytest.mark.integrationtest - def test_post_helloworld_error(self) -> None: - """Test the post method of the /helloworld endpoint.""" - postbody = {"namee": "test"} - resp = self.app.post( - f"{URL_PREFIX}/helloworld", - headers=self.user_auth_header, - data=json.dumps(postbody), - content_type="application/json", - ) - assert isinstance( - resp, - Response, - ), "The response is not of type Response" - assert resp.status_code == 400, "The status code is not 400" - assert resp.data == b"Missing name in JSON content" diff --git a/tests/integrationtests/test_projecthelloworld.py b/tests/integrationtests/test_projecthelloworld.py deleted file mode 100644 index 954faac..0000000 --- a/tests/integrationtests/test_projecthelloworld.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python -"""Copyright (c) 2018-2025 mundialis GmbH & Co. KG. - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -Hello World test -""" - -__license__ = "GPLv3" -__author__ = "Anika Weinmann" -__copyright__ = "Copyright 2022 mundialis GmbH & Co. KG" -__maintainer__ = "mundialis GmbH & Co. KG" - - -import json - -import pytest -from actinia_api import URL_PREFIX -from flask import Response - -from tests.testsuite import ActiniaTestCase - - -class ActiniaHelloWorldTest(ActiniaTestCase): - """Actinia hello world test class for hello world endpoint.""" - - @pytest.mark.integrationtest - def test_get_helloworld(self) -> None: - """Test get method of /helloworld/projects/ endpoint.""" - resp = self.app.get( - f"{URL_PREFIX}/helloworld/{self.project_url_part}/project1", - ) - - assert isinstance( - resp, - Response, - ), "The response is not of type Response" - assert resp.status_code == 200, "The status code is not 200" - assert hasattr(resp, "json"), "The response has no attribute 'json'" - assert ( - "message" in resp.json - ), "There is no 'message' inside the response" - assert ( - resp.json["message"] == "Project: Hello world! project1" - ), "The response message is wrong" - - @pytest.mark.integrationtest - def test_post_helloworld(self) -> None: - """Test post method of /helloworld/projects/ endpoint.""" - postbody = {"name": "test"} - resp = self.app.post( - f"{URL_PREFIX}/helloworld/{self.project_url_part}/project1", - headers=self.user_auth_header, - data=json.dumps(postbody), - content_type="application/json", - ) - assert isinstance( - resp, - Response, - ), "The response is not of type Response" - assert resp.status_code == 200, "The status code is not 200" - assert hasattr(resp, "json"), "The response has no attribute 'json'" - assert ( - "message" in resp.json - ), "There is no 'message' inside the response" - assert resp.json["message"] == ( - "Project: Hello world! Hello world TEST! project1" - ), "The response message is wrong" - - @pytest.mark.integrationtest - def test_post_helloworld_error(self) -> None: - """Test post method of /helloworld/projects/ endpoint.""" - postbody = {"namee": "test"} - resp = self.app.post( - f"{URL_PREFIX}/helloworld/{self.project_url_part}/project1", - headers=self.user_auth_header, - data=json.dumps(postbody), - content_type="application/json", - ) - assert isinstance( - resp, - Response, - ), "The response is not of type Response" - assert resp.status_code == 400, "The status code is not 400" - assert resp.data == b"Missing name in JSON content" - - @pytest.mark.integrationtest - def test_redirecting_deprecated_locations_endpoint(self) -> None: - """Test redirecting of deprecated locations to projects endpoint.""" - if self.grass_version >= [8, 4]: - resp = self.app.get( - f"{URL_PREFIX}/helloworld/locations/project1", - ) - assert isinstance( - resp, - Response, - ), "The response is not of type Response" - # self.app.get is following redirects - assert resp.status_code == 200, "The status code is not 200" - # remove beginning of URL e.g. http://localhost or http://127.0.0.1 - resp_location = "/" + "/".join(resp.location.split("/")[3:]) - assert ( - resp_location == f"{URL_PREFIX}/helloworld/projects/project1" - ), ( - "The deprecated locations endpoint " - "is not forwarded to projects endpoint" - ) - - @pytest.mark.integrationtest - def test_projects_endpoint_for_lt_g84(self) -> None: - """Test non-supported project endpoint for GRASS versions < g84.""" - if self.grass_version < [8, 4]: - resp = self.app.get( - f"{URL_PREFIX}/helloworld/projects/project1", - ) - assert isinstance( - resp, - Response, - ), "The response is not of type Response" - assert resp.status_code == 404, "The status code is not 404" - assert resp.json["message"] == ( - "Not Found. The requested URL " - "is only available from " - "GRASS GIS version 8.4." - ), f"Wrong return message: {resp.data}" diff --git a/tests/test_resource_base.py b/tests/test_resource_base.py deleted file mode 100644 index cfaf226..0000000 --- a/tests/test_resource_base.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python -"""Copyright (c) 2016-2022 mundialis GmbH & Co. KG. - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -Tests: Actinia resource test case base -""" - -from __future__ import annotations - -import atexit -import base64 -import os -import signal -import tempfile -import time -from pathlib import Path - -from actinia_core.core.common.config import global_config -from actinia_core.core.common.user import ActiniaUser -from actinia_core.endpoints import create_endpoints -from actinia_core.testsuite import ActiniaTestCaseBase -from werkzeug.datastructures import Headers - -__license__ = "GPLv3" -__author__ = "Sören Gebbert, Anika Weinmann" -__copyright__ = ( - "Copyright 2016-2022, Sören Gebbert and mundialis GmbH & Co. KG" -) -__maintainer__ = "mundialis GmbH & Co. KG" - -# Create endpoints -create_endpoints() - -KVDB_PID = None -SERVER_TEST = False -CUSTOM_ACTINIA_CFG = False - -# If this environmental variable is set, then a real http request will be send -# instead of using the flask test_client. -if "ACTINIA_SERVER_TEST" in os.environ: - SERVER_TEST = bool(os.environ["ACTINIA_SERVER_TEST"]) -# Set this variable to use a actinia config file in a docker container -if "ACTINIA_CUSTOM_TEST_CFG" in os.environ: - CUSTOM_ACTINIA_CFG = str(os.environ["ACTINIA_CUSTOM_TEST_CFG"]) - - -def setup_environment() -> None: - """Setuo test environment.""" - global KVDB_PID - # Set the port to the test kvdb server - global_config.KVDB_SERVER_SERVER = "localhost" - global_config.KVDB_SERVER_PORT = 7000 - # Set the path to kvdb WORKER_LOGFILE - # global_config.WORKER_LOGFILE = "/var/log/kvdb/kvdb" - - # home = os.getenv("HOME") - - # GRASS GIS - # Setup the test environment - global_config.GRASS_GIS_BASE = "/usr/local/grass/" - global_config.GRASS_GIS_START_SCRIPT = "/usr/local/bin/grass" - # global_config.GRASS_DATABASE= "/usr/local/grass_test_db" - # global_config.GRASS_DATABASE = "%s/actinia/grass_test_db" % home - global_config.GRASS_TMP_DATABASE = tempfile.TemporaryDirectory().name - Path(global_config.GRASS_TMP_DATABASE).mkdir(parents=True) - - if SERVER_TEST is False and CUSTOM_ACTINIA_CFG is False: - # Start the kvdb server for user and logging management - KVDB_PID = os.spawnl( - os.P_NOWAIT, - "/usr/bin/valkey-server", - "common/valkey.conf", - f"--port {global_config.KVDB_SERVER_PORT}", - ) - time.sleep(1) - - if SERVER_TEST is False and CUSTOM_ACTINIA_CFG is not False: - global_config.read(CUSTOM_ACTINIA_CFG) - - -def stop_kvdb() -> None: - """Stop kvdb server.""" - # Kill th kvdb server - if SERVER_TEST is False and KVDB_PID is not None: - os.kill(KVDB_PID, signal.SIGTERM) - - -# Register the kvdb stop function -atexit.register(stop_kvdb) -# Setup the environment -setup_environment() - - -class ActiniaResourceTestCaseBase(ActiniaTestCaseBase): - """Actinia resource test case base class.""" - - @classmethod - def create_user( - cls, - name: str = "guest", - role: str = "guest", - group: str = "group", - password: str = "abcdefgh", - accessible_datasets: dict[str, list | None] | None = None, - process_num_limit: int = 1000, - process_time_limit: int = 6000, - accessible_modules: list[str] | None = None, - ) -> (str, str, Headers()): - """Create actinia user.""" - auth = bytes(f"{name}:{password}", "utf-8") - - # We need to create an HTML basic authorization header - cls.auth_header[role] = Headers() - cls.auth_header[role].add( - "Authorization", - f"Basic {base64.b64encode(auth).decode()}", - ) - - # Make sure the user database is empty - user = ActiniaUser(name) - if user.exists(): - user.delete() - # Create a user in the database - user = ActiniaUser.create_user( - name, - group, - password, - user_role=role, - accessible_datasets=accessible_datasets, - process_num_limit=process_num_limit, - process_time_limit=process_time_limit, - ) - if accessible_modules is None: - accessible_modules = ["sleep"] - user.add_accessible_modules(accessible_modules) - user.update() - cls.users_list.append(user) - - return name, group, cls.auth_header[role] diff --git a/tests/testsuite.py b/tests/testsuite.py index ab5b4af..9189618 100644 --- a/tests/testsuite.py +++ b/tests/testsuite.py @@ -13,8 +13,6 @@ You should have received a copy of the GNU General Public License along with this program. If not, see . - -Base class for GRASS GIS REST API tests """ from __future__ import annotations @@ -24,39 +22,15 @@ __copyright__ = "Copyright 2018-2022 mundialis GmbH & Co. KG" __maintainer__ = "mundialis GmbH & Co. KG" -import base64 import unittest -from typing import ClassVar - -import pwgen -from actinia_core.core.common import kvdb_interface -from actinia_core.core.common.app import flask_app -from actinia_core.core.common.config import global_config -from actinia_core.core.common.user import ActiniaUser -from actinia_core.version import G_VERSION, init_versions -from werkzeug.datastructures import Headers +from actinia_cloudevent_plugin.main import flask_app -class ActiniaTestCase(unittest.TestCase): - """Actinia test case class.""" - # guest = None - # admin = None - # superadmin = None - user: str = None - auth_header: ClassVar[dict] = {} - users_list: ClassVar[list[str]] = [] - project_url_part: str = "projects" +class TestCase(unittest.TestCase): + """Test case class.""" - # set project_url_part to "locations" if GRASS GIS version < 8.4 - init_versions() - - grass_version_s: str = G_VERSION["version"] - grass_version: ClassVar[list[int]] = [ - int(item) for item in grass_version_s.split(".")[:2] - ] - if grass_version < [8, 4]: - project_url_part = "locations" + URL_PREFIX = "http://localhost:5000/api/v1" def setUp(self) -> None: """Overwrite method setUp from unittest.TestCase class.""" @@ -72,128 +46,6 @@ def setUp(self) -> None: flask_app.testing = True self.app = flask_app.test_client() - # Start and connect the kvdb interface - kvdb_args = ( - global_config.KVDB_SERVER_URL, - global_config.KVDB_SERVER_PORT, - ) - if ( - global_config.KVDB_SERVER_PW - and global_config.KVDB_SERVER_PW is not None - ): - kvdb_args = (*kvdb_args, global_config.KVDB_SERVER_PW) - kvdb_interface.connect(*kvdb_args) - - # create test user for roles user (more to come) - accessible_datasets = { - "nc_spm_08": ["PERMANENT", "user1", "modis_lst"], - } - password = pwgen.pwgen() - ( - self.user_id, - self.user_group, - self.user_auth_header, - ) = self.create_user( - name="user", - role="user", - password=password, - process_num_limit=3, - process_time_limit=4, - accessible_datasets=accessible_datasets, - ) - ( - self.restricted_user_id, - self.restricuted_user_group, - self.restricted_user_auth_header, - ) = self.create_user( - name="user2", - role="user", - password=password, - process_num_limit=3, - process_time_limit=4, - accessible_datasets=accessible_datasets, - accessible_modules=["v.db.select", "importer", "r.mapcalc"], - ) - ( - self.admin_id, - self.admin_group, - self.admin_auth_header, - ) = self.create_user( - name="admin", - role="admin", - password=password, - process_num_limit=3, - process_time_limit=4, - accessible_datasets=accessible_datasets, - ) - - # # create process queue - # from actinia_core.core.common.process_queue import \ - # create_process_queue - # create_process_queue(config=global_config) - def tearDown(self) -> None: """Overwrite method tearDown from unittest.TestCase class.""" self.app_context.pop() - - # remove test user; disconnect kvdb - for user in self.users_list: - user.delete() - kvdb_interface.disconnect() - - def create_user( - self, - name: str = "guest", - role: str = "guest", - group: str = "group", - password: str = "abcdefgh", - accessible_datasets: dict[str, list | None] | None = None, - process_num_limit: int = 1000, - process_time_limit: int = 6000, - accessible_modules: list[str] = global_config.MODULE_ALLOW_LIST, - ) -> (str, str, Headers()): - """Create actinia user.""" - auth = bytes(f"{name}:{password}", "utf-8") - # We need to create an HTML basic authorization header - self.auth_header[role] = Headers() - self.auth_header[role].add( - "Authorization", - f"Basic {base64.b64encode(auth).decode()}", - ) - - # Make sure the user database is empty - user = ActiniaUser(name) - if user.exists(): - user.delete() - # Create a user in the database - user = ActiniaUser.create_user( - name, - group, - password, - user_role=role, - accessible_datasets=accessible_datasets, - accessible_modules=accessible_modules, - process_num_limit=process_num_limit, - process_time_limit=process_time_limit, - ) - user.add_accessible_modules(["uname", "sleep"]) - self.users_list.append(user) - - return name, group, self.auth_header[role] - - -# def check_started_process(test_case: , resp: ) -> None: -# """Checks response of started process - TODO: can be enhanced.""" -# if isinstance(resp.json["process_results"], dict): -# resp.json["process_results"] = str(resp.json["process_results"]) -# resp_class = ProcessingResponseModel(**resp.json) -# assert resp_class["status"] == "accepted" -# status_url = resp_class["urls"]["status"] - -# # poll status_url -# # TODO: status stays in accepted -# status_resp = test_case.app.get( -# status_url, -# headers=test_case.user_auth_header, -# ) -# assert status_resp.json["urls"]["status"] == status_url diff --git a/tests/unittests/test_transformation.py b/tests/unittests/test_transformation.py deleted file mode 100644 index fb0e472..0000000 --- a/tests/unittests/test_transformation.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -"""Copyright (c) 2018-2024 mundialis GmbH & Co. KG. - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -First test -""" - -__license__ = "GPLv3" -__author__ = "Anika Weinmann" -__copyright__ = "Copyright 2022 mundialis GmbH & Co. KG" -__maintainer__ = "mundialis GmbH & Co. KG" - -import pytest - -from actinia_cloudevent_plugin.core.example import transform_input - - -@pytest.mark.unittest -@pytest.mark.parametrize( - ("inp", "ref_out"), - [("test", "Hello world TEST!"), ("bla23", "Hello world BLA23!")], -) -def test_transform_input(inp: str, ref_out: str) -> None: - """Test for tranform_input function.""" - out = transform_input(inp) - assert out == ref_out, f"Wrong result from transform_input for {inp}" diff --git a/tests_with_kvdb.sh b/tests_with_cloudevent_receiver.sh similarity index 53% rename from tests_with_kvdb.sh rename to tests_with_cloudevent_receiver.sh index 65d9aa3..091e3c4 100755 --- a/tests_with_kvdb.sh +++ b/tests_with_cloudevent_receiver.sh @@ -1,17 +1,9 @@ #!/usr/bin/env sh -# start kvdb server -valkey-server & +# start cloud event receiver server +python3 tests/cloudevent_receiver_server.py & +SERVER_PID=$! sleep 1 -valkey-cli ping - -# start webhook server -webhook-server --host "0.0.0.0" --port "5005" & -sleep 10 - -# run tests -echo "${ACTINIA_CUSTOM_TEST_CFG}" -echo "${DEFAULT_CONFIG_PATH}" if [ "$1" = "dev" ] then @@ -29,7 +21,7 @@ fi TEST_RES=$? -# stop kvdb server -valkey-cli shutdown +# stop cloud event receiver server, when tests finished +kill $SERVER_PID return $TEST_RES