diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000..a3d4617
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,2 @@
+github: johanneswilm
+custom: https://www.fiduswriter.org/donate/
diff --git a/.shellspec b/.shellspec
new file mode 100644
index 0000000..d567ecf
--- /dev/null
+++ b/.shellspec
@@ -0,0 +1,12 @@
+--require spec_helper
+
+## Default kcov (coverage) options
+# --kcov-options "--include-path=. --path-strip-level=1"
+# --kcov-options "--include-pattern=.sh"
+# --kcov-options "--exclude-pattern=/.shellspec,/spec/,/coverage/,/report/"
+
+## Example: Include script "myprog" with no extension
+# --kcov-options "--include-pattern=.sh,myprog"
+
+## Example: Only specified files/directories
+# --kcov-options "--include-pattern=myprog,/lib/"
diff --git a/README.md b/README.md
index ec51d3d..93955bf 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,9 @@
# fiduswriter-snap
-Snapcraft build instructions for Fidus Writer (work in progress)
+Snapcraft build instructions for Fidus Writer.
-Large parts of this package is based on [Snappy Nextcloud](https://github.com/nextcloud/nextcloud-snap)
+Large parts of this package are based on the the snapcraft build instructions for [Nextcloud](https://github.com/nextcloud/nextcloud-snap).
+
+## How to install
+
+[](https://snapcraft.io/fiduswriter)
diff --git a/build_clean.sh b/build_clean.sh
new file mode 100755
index 0000000..c32af20
--- /dev/null
+++ b/build_clean.sh
@@ -0,0 +1,4 @@
+#!/bin/sh -e
+
+
+snapcraft clean && SNAPCRAFT_BUILD_ENVIRONMENT_MEMORY=8G snapcraft
diff --git a/snap/gui/icon.svg b/snap/gui/icon.svg
new file mode 100644
index 0000000..d0e3b89
--- /dev/null
+++ b/snap/gui/icon.svg
@@ -0,0 +1,214 @@
+
+
diff --git a/snap/plugins/apache.py b/snap/plugins/apache.py
new file mode 100644
index 0000000..f62efa4
--- /dev/null
+++ b/snap/plugins/apache.py
@@ -0,0 +1,57 @@
+import subprocess
+import snapcraft.plugins.v1
+
+
+class ApachePlugin(snapcraft.plugins.v1.PluginV1):
+
+ @classmethod
+ def schema(cls):
+ schema = super().schema()
+
+ schema['properties']['modules'] = {
+ 'type': 'array',
+ 'minitems': 1,
+ 'uniqueItems': True,
+ 'items': {
+ 'type': 'string'
+ },
+ }
+
+ schema['properties']['mpm'] = {
+ 'type': 'string',
+ 'default': 'event',
+ }
+
+ schema['required'] = ['modules']
+
+ return schema
+
+ @classmethod
+ def get_build_properties(cls):
+ # Inform Snapcraft of the properties associated with building. If these
+ # change in the YAML Snapcraft will consider the build step dirty.
+ return super().get_build_properties() + ["modules", "mpm"]
+
+ def __init__(self, name, options, project):
+ super().__init__(name, options, project)
+
+ self.build_packages.extend(
+ ['pkg-config', 'libapr1-dev', 'libaprutil1-dev', 'libpcre3-dev',
+ 'libssl-dev'])
+ self.stage_packages.extend(['libapr1', 'libaprutil1'])
+
+
+ def build(self):
+ super().build()
+
+ subprocess.check_call(
+ "./configure --prefix={} --with-mpm={} --enable-modules=none --enable-mods-static='{}'".format(
+ self.installdir, self.options.mpm,
+ ' '.join(self.options.modules)),
+ cwd=self.builddir, shell=True)
+
+ self.run(
+ ['make', '-j{}'.format(
+ self.project.parallel_build_count)],
+ cwd=self.builddir)
+ self.run(['make', 'install'], cwd=self.builddir)
diff --git a/snap/plugins/redis.py b/snap/plugins/redis.py
new file mode 100644
index 0000000..4f6bc85
--- /dev/null
+++ b/snap/plugins/redis.py
@@ -0,0 +1,27 @@
+import os
+import logging
+import shutil
+import re
+import subprocess
+
+import snapcraft
+from snapcraft.plugins.v1 import make
+
+logger = logging.getLogger(__name__)
+
+
+class RedisPlugin(make.MakePlugin):
+
+ def build(self):
+ super(make.MakePlugin, self).build()
+
+ command = ['make']
+
+ if self.options.makefile:
+ command.extend(['-f', self.options.makefile])
+
+ if self.options.make_parameters:
+ command.extend(self.options.make_parameters)
+
+ self.run(command + ['-j{}'.format(self.project.parallel_build_count)])
+ self.run(command + ['install', 'PREFIX=' + self.installdir])
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index a40af9d..7229c4f 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,17 +1,198 @@
name: fiduswriter
adopt-info: fiduswriter
summary: 'An academic collaborative word processor'
-base: core20
+base: core18
+confinement: strict
+grade: stable
+epoch: 3*
+
architectures:
- build-on: amd64
- build-on: arm64
- build-on: armhf
- build-on: ppc64el
- build-on: s390x
-confinement: strict
-grade: stable
-epoch: 3*
+
+apps:
+ # Apache daemon
+ apache:
+ command: bin/run-httpd -k start -DFOREGROUND
+ stop-command: bin/httpd-wrapper -k stop
+ reload-command: bin/httpd-wrapper -k graceful
+ daemon: simple
+ restart-condition: always
+ plugs: [network, network-bind, removable-media]
+
+ # MySQL daemon
+ mysql:
+ command: bin/start_mysql
+ stop-command: support-files/mysql.server stop
+ reload-command: bin/reload-mysql
+ daemon: simple
+ restart-condition: always
+ plugs: [network, network-bind]
+
+ # redis server daemon
+ redis-server:
+ command: bin/start-redis-server
+ daemon: simple
+ restart-condition: always
+ plugs: [network, network-bind]
+
+ # mDNS daemon
+ mdns-publisher:
+ command: bin/delay-on-failure mdns-publisher fiduswriter
+ daemon: simple
+ restart-condition: always
+ plugs: [network, network-bind]
+
+ # MySQL client
+ mysql-client:
+ command: bin/run-mysql
+ plugs: [network, network-bind]
+
+ mysqldump:
+ command: bin/run-mysqldump
+ plugs: [network, network-bind]
+
+ enable-https:
+ command: bin/enable-https
+ plugs: [network, network-bind]
+
+ disable-https:
+ command: bin/disable-https
+ plugs: [network, network-bind]
+
+ renew-certs:
+ command: bin/renew-certs
+ daemon: simple
+ restart-condition: always
+ plugs: [network, network-bind]
+
+ import:
+ command: bin/import-data
+ plugs: [network, network-bind, removable-media]
+
+ export:
+ command: bin/export-data
+ plugs: [network, network-bind, removable-media]
+
+ logrotate:
+ command: bin/run-logrotate
+ daemon: simple
+ restart-condition: on-failure
+ timer: 00:00 # Run once a day at midnight
+
+ daemon:
+ command: 'bin/run-fiduswriter.py'
+ daemon: simple
+ plugs:
+ - network-bind
+ - network-control
+ - network-observe
+
+ manage:
+ command: 'bin/manage.sh'
+ plugs:
+ - network-bind
+ - network-control
+ - network-observe
+ adapter: full
+
+ createsuperuser:
+ command: 'bin/manage.sh createsuperuser'
+
+ configure:
+ command: 'bin/configure.py'
+ plugs:
+ - network-bind
+ - network-control
+ - network-observe
+ adapter: full
+
+ languagetool:
+ command: bin/run-languagetool.py
+ environment:
+ JAVA_HOME: $SNAP/usr/lib/jvm/java-11-openjdk-$SNAP_ARCH
+ JAVA_BIN: $SNAP/usr/lib/jvm/java-11-openjdk-$SNAP_ARCH/bin/java
+ PATH: $JAVA_HOME/bin:$PATH
+ daemon: simple
+ restart-condition: always
+ plugs: [network-bind]
+
+hooks:
+ configure:
+ plugs: [network, network-bind]
+ pre-refresh:
+ plugs: [network, network-bind]
+
parts:
+ apache:
+ plugin: apache
+ source: http://ftp.wayne.edu/apache/httpd/httpd-2.4.49.tar.bz2
+ source-checksum: sha256/65b965d6890ea90d9706595e4b7b9365b5060bec8ea723449480b4769974133b
+
+ override-pull: |
+ snapcraftctl pull
+
+ # For some reason, all directories in (and after) 2.4.32 are setgid.
+ # Reported as https://bz.apache.org/bugzilla/show_bug.cgi?id=62298
+ # Work around by unsetting setgid. FIXME: Remove when bug is fixed.
+ find . -perm -g+s -exec chmod g-s {} \;
+
+ # The built-in Apache modules to enable
+ modules:
+ - headers
+ - proxy
+ - proxy_fcgi
+ - setenvif
+ - env
+ - rewrite
+ - mime
+ - dir
+ - authz_core
+ - unixd
+ - alias
+ - ssl
+ - socache_shmcb
+ - slotmem_shm
+ - log_config
+ - logio
+
+ filesets:
+ exclude:
+ - -man
+ - -manual
+ - -htdocs
+ - -include
+ - -build
+ - -conf/httpd.conf
+ - -conf/magic
+ - -conf/original
+ - -conf/extra
+ - -bin/apachectl
+ - -bin/envvars*
+ stage:
+ - $exclude
+ prime:
+ - $exclude
+
+ apache-customizations:
+ plugin: dump
+ source: src/apache/
+
+ redis:
+ plugin: redis
+ source: http://download.redis.io/releases/redis-6.0.15.tar.gz
+ source-checksum: sha256/4bc295264a95bc94423c162a9eee66135a24a51eefe5f53f18fc9bde5c3a9f74
+
+ redis-customizations:
+ plugin: dump
+ source: src/redis/
+ organize:
+ config/*: config/redis/
+ after: [envsubst]
+
fiduswriter:
plugin: python
parse-info:
@@ -21,7 +202,7 @@ parts:
after: [fiduswriter-patches, mysql]
override-build: | # Removing here rather than in stage below due to snapcraft issue https://forum.snapcraft.io/t/conflicting-files-sometimes/19818/2
snapcraftctl build
- rm $SNAPCRAFT_PART_INSTALL/lib/python3.8/site-packages/fiduswriter/configuration.py-default
+ rm $SNAPCRAFT_PART_INSTALL/lib/python3.6/site-packages/fiduswriter/configuration.py-default
override-pull: |
snapcraftctl pull
git apply $SNAPCRAFT_STAGE/fiduswriter-set-defaults.patch
@@ -50,14 +231,14 @@ parts:
- nano
# stage:
# # Remove scripts that we'll be replacing with our own
- # - -lib/python3.8/site-packages/fiduswriter/configuration.py-default
+ # - -lib/python3.6/site-packages/fiduswriter/configuration.py-default
fiduswriter-customizations:
plugin: dump
source: src/fiduswriter/
after: [fiduswriter]
organize:
- configuration.py: lib/python3.8/site-packages/fiduswriter/configuration.py-default
+ configuration.py: lib/python3.6/site-packages/fiduswriter/configuration.py-default
languagetool:
plugin: nil
@@ -82,8 +263,9 @@ parts:
source: src/languagetool/
npm:
- plugin: npm
- npm-node-version: 12.18.3
+ plugin: nodejs
+ nodejs-package-manager: npm
+ nodejs-version: 12.18.3
source: src/npm
prime:
- -package.json
@@ -95,14 +277,8 @@ parts:
# be updated if the version of MySQL changes.
boost:
plugin: dump
- source: https://github.com/kyrofa/boost_tarball/raw/master/boost_1_59_0.tar.gz
- source-checksum: sha1/5123209db194d66d69a9cfa5af8ff473d5941d97
- # When building MySQL, the headers in the source directory 'boost/' are
- # required. Previously, using the 'copy' plugin, the whole archive was put
- # under 'boost/', making the headers reside in 'boost/boost/'. Due to a bug,
- # we now only stage the 'boost/' directory without moving it.
- #
- # Bug: https://bugs.launchpad.net/snapcraft/+bug/1757093
+ source: https://sourceforge.net/projects/boost/files/boost/1.73.0/boost_1_73_0.tar.bz2
+ source-checksum: sha1/6d6ed02b29c860fd21b274fc4e1f820855e765e9
stage:
- boost/
prime:
@@ -110,68 +286,68 @@ parts:
mysql:
plugin: cmake
- source: https://github.com/mysql/mysql-server.git
- source-tag: mysql-5.7.31
- source-depth: 1
- override-pull: |
- snapcraftctl pull
- git apply $SNAPCRAFT_STAGE/mysql-support-compile-time-disabling-of-setpriority.patch
- after: [boost, mysql-patches]
- cmake-parameters:
- - -DWITH_BOOST=$SNAPCRAFT_STAGE
- - -DWITH_INNODB_PAGE_CLEANER_PRIORITY=OFF
+ after: [boost]
+
+ # Get from https://dev.mysql.com/downloads/mysql/
+ source: https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-boost-8.0.26.tar.gz
+ source-checksum: md5/3b3e641a80005dde29ad52b4d1649c6b
+ configflags:
- -DCMAKE_INSTALL_PREFIX=/
- -DBUILD_CONFIG=mysql_release
+ - -DCMAKE_BUILD_TYPE=Release
- -DWITH_UNIT_TESTS=OFF
- -DWITH_EMBEDDED_SERVER=OFF
- -DWITH_ARCHIVE_STORAGE_ENGINE=OFF
+ - -DWITH_EXAMPLE_STORAGE_ENGINE=OFF
- -DWITH_BLACKHOLE_STORAGE_ENGINE=OFF
- -DWITH_FEDERATED_STORAGE_ENGINE=OFF
- -DWITH_PARTITION_STORAGE_ENGINE=OFF
+ - -DWITH_PERFSCHEMA_STORAGE_ENGINE=OFF
+ - -DWITH_DEBUG=OFF
+ - -DWITH_INNODB_EXTRA_DEBUG=OFF
- -DINSTALL_MYSQLTESTDIR=
+ - -DDOWNLOAD_BOOST=OFF
+ - -DWITH_BOOST=$SNAPCRAFT_STAGE
+
+ override-build: |
+ snapcraftctl build
+ # MySQL v8 is massive. Strip it.
+ find "$SNAPCRAFT_PART_INSTALL/bin" -type f -exec sh -c 'grep -IL . "$1" || strip --strip-all "$1"' sh "{}" \;
+
build-packages:
- - pkg-config
- - wget
- - g++
- - make
- - cmake
- - bison
- libncurses5-dev
- - libaio-dev
- - libssl-dev
- stage-packages:
- - libaio1
+ - binutils
+ - gcc-8
+ - g++-8
+
stage:
# Remove scripts that we'll be replacing with our own
- -support-files/mysql.server
- -COPYING
- prime:
- # Remove scripts that we'll be replacing with our own
- - -support-files/mysql.server
# Remove unused binaries that waste space
+ - -bin/comp_err
+ - -bin/ibd2sdi
- -bin/innochecksum
- -bin/lz4_decompress
+ - -bin/myisam_ftdump
- -bin/myisam*
+ - -bin/mysql_client_test
+ - -bin/mysql_secure_installation
+ - -bin/mysql_ssl_rsa_setup
+ - -bin/mysql_tzinfo_to_sql
+ - -bin/mysql_upgrade
- -bin/mysqladmin
- -bin/mysqlbinlog
- - -bin/mysql_client_test
- - -bin/mysql_config*
+ - -bin/mysqlcheck
- -bin/mysqld_multi
- -bin/mysqlimport
- - -bin/mysql_install_db
- - -bin/mysql_plugin
- -bin/mysqlpump
- - -bin/mysql_secure_installation
+ - -bin/mysqlrouter*
- -bin/mysqlshow
- -bin/mysqlslap
- - -bin/mysql_ssl_rsa_setup
- - -bin/mysqltest
- - -bin/mysql_tzinfo_to_sql
+ - -bin/mysqltest*
- -bin/perror
- - -bin/replace
- - -bin/resolveip
- - -bin/resolve_stack_dump
- -bin/zlib_decompress
# Copy over our MySQL scripts
@@ -179,8 +355,8 @@ parts:
plugin: dump
source: src/mysql/
- mysql-patches:
- source: src/mysql-patches
+ patches:
+ source: src/patches
plugin: dump
prime:
- -*
@@ -191,78 +367,70 @@ parts:
prime:
- -*
+ mdns-publisher:
+ plugin: godeps
+ source: https://github.com/kyrofa/mdns-publisher.git
+ go-importpath: github.com/kyrofa/mdns-publisher
+
+ delay-on-failure:
+ plugin: dump
+ source: src/delay-on-failure/
+
+ certbot:
+ plugin: python
+ python-version: python2
+ source: src/https/
+ requirements: ["requirements.txt"]
+ build-packages: [libffi-dev]
+ after: [patches]
+ override-build: |
+ snapcraftctl build
+ patch -p1 -d $SNAPCRAFT_PART_INSTALL/lib/python2.7/site-packages/certbot < $SNAPCRAFT_STAGE/certbot-remove-default-config-files.patch
+ patch -p1 -d $SNAPCRAFT_PART_INSTALL/lib/python2.7/site-packages/certbot < $SNAPCRAFT_STAGE/certbot-remove-storage-chown.patch
+
+ setup-https:
+ plugin: dump
+ source: src/https/
+ stage-packages: [openssl]
+ stage: [-etc/ssl, -requirements.txt]
+
+ import-export:
+ plugin: dump
+ source: src/import-export
+ stage-packages: [rsync]
+
common:
plugin: dump
source: src/common/
-
hooks:
plugin: dump
source: src/hooks/
organize:
bin/: snap/hooks/
+ stage-packages: [curl]
-hooks:
- configure:
- plugs:
- - network-bind
- - network-control
- - network-observe
-
-apps:
- daemon:
- command: 'bin/run-fiduswriter.py'
- daemon: simple
- plugs:
- - network-bind
- - network-control
- - network-observe
-
- manage:
- command: 'bin/manage.sh'
- plugs:
- - network-bind
- - network-control
- - network-observe
- adapter: full
-
- createsuperuser:
- command: 'bin/manage.sh createsuperuser'
-
- configure:
- command: 'bin/configure.py'
- plugs:
- - network-bind
- - network-control
- - network-observe
- adapter: full
-
- # MySQL daemon
- mysql:
- command: bin/start_mysql
- stop-command: support-files/mysql.server stop
- daemon: simple
- restart-condition: always
- plugs: [network, network-bind]
-
- # MySQL client
- mysql-client:
- command: bin/run-mysql
- plugs: [network, network-bind]
+ logrotate:
+ plugin: dump
+ source: src/logrotate/
+ organize:
+ config/*: config/logrotate/
+ usr/sbin/*: bin/
+ stage-packages: [logrotate]
+ stage:
+ - bin/*
+ - config/*
+ - utilities/*
+ after: [envsubst]
- mysqldump:
- command: bin/dump-mysql
- plugs: [network, network-bind]
+ migrations:
+ plugin: dump
+ source: src/migrations/
- languagetool:
- command: bin/run-languagetool.py
- environment:
- JAVA_HOME: $SNAP/usr/lib/jvm/java-11-openjdk-$SNAP_ARCH
- JAVA_BIN: $SNAP/usr/lib/jvm/java-11-openjdk-$SNAP_ARCH/bin/java
- PATH: $JAVA_HOME/bin:$PATH
- daemon: simple
- restart-condition: always
- plugs: [network-bind]
+ envsubst:
+ plugin: nil
+ stage-packages: [gettext-base]
+ stage: [usr/bin/envsubst]
layout:
/etc/magic:
diff --git a/src/apache/bin/httpd-wrapper b/src/apache/bin/httpd-wrapper
new file mode 100755
index 0000000..7cf5824
--- /dev/null
+++ b/src/apache/bin/httpd-wrapper
@@ -0,0 +1,38 @@
+#!/bin/sh
+
+# shellcheck source=src/https/utilities/https-utilities
+. "$SNAP/utilities/https-utilities"
+# shellcheck source=src/apache/utilities/apache-utilities
+. "$SNAP/utilities/apache-utilities"
+# shellcheck source=src/hooks/utilities/configuration-utilities
+. "$SNAP/utilities/configuration-utilities"
+
+params=""
+if certificates_are_active; then
+ echo "Certificates have been activated: using HTTPS only"
+ params="$params -DEnableHTTPS"
+
+ # Enable HSTS if possible
+ if should_enable_hsts; then
+ echo "Certificates look to be in order: enabling HSTS"
+ params="$params -DEnableHSTS"
+ else
+ echo "Certificates appear self-signed: disabling HSTS"
+ fi
+else
+ echo "No certificates are active: using HTTP only"
+fi
+
+if debug_mode_enabled; then
+ params="$params -DDebug"
+fi
+
+HTTP_PORT="$(apache_http_port)"
+HTTPS_PORT="$(apache_https_port)"
+export HTTP_PORT
+export HTTPS_PORT
+
+# Disable shellcheck for quoting params, since we're building a command line
+# and these need to be separated by spaces
+# shellcheck disable=SC2086
+httpd -d "$SNAP" $params "$@"
diff --git a/src/apache/bin/run-httpd b/src/apache/bin/run-httpd
new file mode 100755
index 0000000..f93853b
--- /dev/null
+++ b/src/apache/bin/run-httpd
@@ -0,0 +1,33 @@
+#!/bin/sh
+
+# shellcheck source=src/https/utilities/https-utilities
+. "$SNAP/utilities/https-utilities"
+
+mkdir -p "$SNAP_DATA/logs"
+chmod 750 "$SNAP_DATA/logs"
+
+# Make sure Nextcloud is installed and running
+echo "Making sure nextcloud is setup..."
+if ! setup-nextcloud; then
+ echo "Failed to setup nextcloud"
+ exit 1
+fi
+
+echo "All set! Running httpd..."
+
+# Rewrite live cert symlinks that aren't using the current symlink.
+# FIXME: Remove this migration once epochs and upgrade hooks are available.
+if certificates_are_active; then
+ live_basename="$(basename "$(realpath "$LIVE_CERTS_DIRECTORY")")"
+ self_signed_basename="$(basename "$SELF_SIGNED_DIRECTORY")"
+ custom_basename="$(basename "$CUSTOM_DIRECTORY")"
+ if [ "$live_basename" = "$self_signed_basename" ]; then
+ activate_self_signed_certificate
+ elif [ "$live_basename" = "$custom_basename" ]; then
+ activate_custom_certificate
+ else
+ activate_certbot_certificate
+ fi
+fi
+
+httpd-wrapper "$@"
diff --git a/src/apache/conf/httpd.conf b/src/apache/conf/httpd.conf
new file mode 100644
index 0000000..83c960e
--- /dev/null
+++ b/src/apache/conf/httpd.conf
@@ -0,0 +1,185 @@
+# ServerRoot: The top of the directory tree under which the server's
+# configuration, error, and log files are kept.
+#
+# Do not add a slash at the end of the directory path. If you point
+# ServerRoot at a non-local disk, be sure to specify a local disk on the
+# Mutex directive, if file-based mutexes are used. If you wish to share the
+# same ServerRoot for multiple httpd daemons, you will need to change at
+# least PidFile.
+#
+ServerRoot "${SNAP}"
+
+#
+# Listen: Allows you to bind Apache to specific IP addresses and/or
+# ports, instead of the default. See also the
+# directive.
+#
+# Change this to Listen on specific IP addresses as shown below to
+# prevent Apache from glomming onto all bound IP addresses.
+#
+#Listen 12.34.56.78:80
+Listen ${HTTP_PORT}
+
+#
+# Mutex: Allows you to set the mutex mechanism and mutex file directory
+# for individual mutexes, or change the global defaults
+#
+# Using pthread here, since Apache tries to chown the file-based mutex
+# which isn't allowed in Snappy, and Ubuntu supports robust pthread
+# mutexes that can be recovered if the child process terminates
+# abnormally.
+#
+Mutex pthread
+
+#
+# PidFile: Allows you to place the pidfile in a specific location.
+PidFile "${APACHE_PIDFILE}"
+
+#
+# Dynamic Shared Object (DSO) Support
+#
+# To be able to use the functionality of a module which was built as a DSO you
+# have to place corresponding `LoadModule' lines at this location so the
+# directives contained in it are actually available _before_ they are used.
+# Statically compiled modules (those listed by `httpd -l') do not need
+# to be loaded here.
+#
+# Example:
+# LoadModule foo_module modules/mod_foo.so
+#
+
+# 'Main' server configuration
+#
+# The directives in this section set up the values used by the 'main'
+# server, which responds to any requests that aren't handled by a
+# definition. These values also provide defaults for
+# any containers you may define later in the file.
+#
+# All of these directives may appear inside containers,
+# in which case these default settings will be overridden for the
+# virtual host being defined.
+#
+
+#
+# Deny access to the entirety of your server's filesystem. You must
+# explicitly permit access to web content directories in other
+# blocks below.
+#
+
+ AllowOverride none
+ Require all denied
+
+
+#
+# Note that from this point forward you must specifically allow
+# particular features to be enabled - so if something's not working as
+# you might expect, make sure that you have specifically enabled it
+# below.
+#
+
+#
+# DocumentRoot: The directory out of which you will serve your
+# documents. By default, all requests are taken from this directory, but
+# symbolic links and aliases may be used to point to other locations.
+#
+DocumentRoot "${SNAP}/htdocs"
+
+ #
+ # Possible values for the Options directive are "None", "All",
+ # or any combination of:
+ # Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews
+ #
+ # Note that "MultiViews" must be named *explicitly* --- "Options All"
+ # doesn't give it to you.
+ #
+ # The Options directive is both complicated and important. Please see
+ # http://httpd.apache.org/docs/2.4/mod/core.html#options
+ # for more information.
+ #
+ Options FollowSymLinks
+
+ #
+ # AllowOverride controls what directives may be placed in .htaccess files.
+ # It can be "All", "None", or any combination of the keywords:
+ # AllowOverride FileInfo AuthConfig Limit
+ #
+ AllowOverride None
+
+ #
+ # Controls who can get stuff from this server.
+ #
+ Require all granted
+
+ # Include Nextcloud's .htaccess file directly. In a typical setup this would
+ # be dangerous since it increases the capability of the .htaccess file in
+ # case an attacker was able to modify it, but that's not actually possible
+ # on Snappy (since the .htaccess file is read-only) so we'll do it here so
+ # as to avoid manually copying it in and needing to maintain it.
+ Include ${SNAP}/htdocs/.htaccess
+
+
+# Serve static assets for apps in a writable location.
+Alias "/extra-apps" "${SNAP_DATA}/nextcloud/extra-apps"
+
+ AllowOverride None
+ Require all granted
+
+
+# Serve ACME authentication data (Let's Encrypt).
+Alias "/.well-known/acme-challenge" "${SNAP_DATA}/certs/certbot/.well-known/acme-challenge"
+
+ AllowOverride None
+ Require all granted
+
+
+#
+# The following lines prevent .htaccess and .htpasswd files from being
+# viewed by Web clients.
+#
+
+ Require all denied
+
+
+# The "combined" format is taken from the Ubuntu Apache config
+LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
+
+#
+# Default log location. If you define an error logfile for a
+# container, that host's errors will be logged there and not here.
+#
+ErrorLog "${SNAP_DATA}/logs/apache_errors.log"
+CustomLog "${SNAP_DATA}/logs/apache_access.log" combined
+
+#
+# LogLevel: Control the number of messages logged to the error_log.
+# Possible values include: debug, info, notice, warn, error, crit,
+# alert, emerg.
+#
+LogLevel warn
+
+#
+# TypesConfig points to the file containing the list of mappings from
+# filename extension to MIME-type.
+#
+TypesConfig conf/mime.types
+
+# Disable HTTP TRACE method.
+TraceEnable off
+
+# Disable HTTP TRACK method.
+RewriteEngine On
+RewriteCond %{REQUEST_METHOD} ^TRACK
+RewriteRule .* - [R=405,L]
+
+# Disable debug tokens and signature unless debug mode is requested
+ServerTokens Prod
+ServerSignature Off
+
+ ServerTokens Full
+ ServerSignature On
+
+
+# Only enable SSL if requested
+
+ Include ${SNAP}/conf/ssl.conf
+
diff --git a/src/apache/conf/ssl.conf b/src/apache/conf/ssl.conf
new file mode 100644
index 0000000..232ca34
--- /dev/null
+++ b/src/apache/conf/ssl.conf
@@ -0,0 +1,171 @@
+#
+# Listen: Allows you to bind Apache to specific IP addresses and/or
+# ports, instead of the default. See also the
+# directive.
+#
+# Change this to Listen on specific IP addresses as shown below to
+# prevent Apache from glomming onto all bound IP addresses.
+#
+#Listen 12.34.56.78:80
+Listen ${HTTPS_PORT}
+
+#
+# Dynamic Shared Object (DSO) Support
+#
+# To be able to use the functionality of a module which was built as a DSO you
+# have to place corresponding `LoadModule' lines at this location so the
+# directives contained in it are actually available _before_ they are used.
+# Statically compiled modules (those listed by `httpd -l') do not need
+# to be loaded here.
+#
+# Example:
+# LoadModule foo_module modules/mod_foo.so
+#
+
+# 'Main' server configuration
+#
+# The directives in this section set up the values used by the 'main'
+# server, which responds to any requests that aren't handled by a
+# definition. These values also provide defaults for
+# any containers you may define later in the file.
+#
+# All of these directives may appear inside containers,
+# in which case these default settings will be overridden for the
+# virtual host being defined.
+#
+
+# SSL Protocol support:
+# List the protocol versions which clients are allowed to connect with.
+# Disable SSLv3 by default (cf. RFC 7525 3.1.1). TLSv1 (1.0) should be
+# disabled as quickly as practical. By the end of 2016, only the TLSv1.2
+# protocol or later should remain in use.
+SSLProtocol all -SSLv3 -TLSv1 -TLSv1.1
+SSLProxyProtocol all -SSLv3 -TLSv1 -TLSv1.1
+
+# Pass Phrase Dialog:
+# Configure the pass phrase gathering process.
+# The filtering dialog program (`builtin' is an internal
+# terminal dialog) has to provide the pass phrase on stdout.
+SSLPassPhraseDialog builtin
+
+# Inter-Process Session Cache:
+# Configure the SSL Session Cache: First the mechanism
+# to use and second the expiring timeout (in seconds).
+SSLSessionCache "shmcb:${SNAP_DATA}/apache/ssl_scache(512000)"
+SSLSessionCacheTimeout 300
+
+# Pseudo Random Number Generator (PRNG):
+# Configure one or more sources to seed the PRNG of the SSL library.
+# The seed data should be of good random quality.
+# WARNING! On some platforms /dev/random blocks if not enough entropy
+# is available. This means you then cannot use the /dev/random device
+# because it would lead to very long connection times (as long as
+# it requires to make more entropy available). But usually those
+# platforms additionally provide a /dev/urandom device which doesn't
+# block. So, if available, use this one instead. Read the mod_ssl User
+# Manual for more details.
+#
+SSLRandomSeed startup builtin
+SSLRandomSeed startup file:/dev/urandom 512
+SSLRandomSeed connect builtin
+SSLRandomSeed connect file:/dev/urandom 512
+
+# Virtual host for HTTP. All it does it redirect to HTTPS.
+
+ RewriteEngine on
+ # Disable HTTP TRACK method.
+ RewriteCond %{REQUEST_METHOD} ^TRACK
+ RewriteRule .* - [R=405,L]
+ # Do not redirect Let's Encrypt challenge requests
+ RewriteCond %{REQUEST_URI} !^/.well-known/acme-challenge/.*
+ # Redirect everything else to HTTPS
+ RewriteRule ^ https://%{SERVER_NAME}:${HTTPS_PORT}%{REQUEST_URI} [END,QSA,R=permanent]
+
+
+# Virtual host for HTTPS.
+
+
+ # Disable HTTP TRACK method.
+ RewriteEngine On
+ RewriteCond %{REQUEST_METHOD} ^TRACK
+ RewriteRule .* - [R=405,L]
+
+ SSLEngine on
+ SSLHonorCipherOrder On
+ SSLCipherSuite ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:DHE-RSA-AES256-GCM-SHA384
+
+ # Ensure perfect forward secrecy isn't compromised; the server doesn't
+ # necessarily restart regularly.
+ SSLSessionTickets off
+
+ SSLCertificateFile ${SNAP_DATA}/certs/live/cert.pem
+ SSLCertificateKeyFile ${SNAP_DATA}/certs/live/privkey.pem
+ SSLCertificateChainFile ${SNAP_DATA}/certs/live/chain.pem
+
+ # SSL Engine Options:
+ # Set various options for the SSL engine.
+ # o FakeBasicAuth:
+ # Translate the client X.509 into a Basic Authorisation. This means that
+ # the standard Auth/DBMAuth methods can be used for access control. The
+ # user name is the `one line' version of the client's X.509 certificate.
+ # Note that no password is obtained from the user. Every entry in the user
+ # file needs this password: `xxj31ZMTZzkVA'.
+ # o ExportCertData:
+ # This exports two additional environment variables: SSL_CLIENT_CERT and
+ # SSL_SERVER_CERT. These contain the PEM-encoded certificates of the
+ # server (always existing) and the client (only existing when client
+ # authentication is used). This can be used to import the certificates
+ # into CGI scripts.
+ # o StdEnvVars:
+ # This exports the standard SSL/TLS related `SSL_*' environment variables.
+ # Per default this exportation is switched off for performance reasons,
+ # because the extraction step is an expensive operation and is usually
+ # useless for serving static content. So one usually enables the
+ # exportation for CGI and SSI requests only.
+ # o StrictRequire:
+ # This denies access when "SSLRequireSSL" or "SSLRequire" applied even
+ # under a "Satisfy any" situation, i.e. when it applies access is denied
+ # and no other module can change it.
+ # o OptRenegotiate:
+ # This enables optimized SSL connection renegotiation handling when SSL
+ # directives are used in per-directory context.
+
+ SSLOptions +StdEnvVars
+
+
+ SSLOptions +StdEnvVars
+
+
+ # SSL Protocol Adjustments:
+ # The safe and default but still SSL/TLS standard compliant shutdown
+ # approach is that mod_ssl sends the close notify alert but doesn't wait for
+ # the close notify alert from client. When you need a different shutdown
+ # approach you can use one of the following variables:
+ # o ssl-unclean-shutdown:
+ # This forces an unclean shutdown when the connection is closed, i.e. no
+ # SSL close notify alert is sent or allowed to be received. This violates
+ # the SSL/TLS standard but is needed for some brain-dead browsers. Use
+ # this when you receive I/O errors because of the standard approach where
+ # mod_ssl sends the close notify alert.
+ # o ssl-accurate-shutdown:
+ # This forces an accurate shutdown when the connection is closed, i.e. a
+ # SSL close notify alert is send and mod_ssl waits for the close notify
+ # alert of the client. This is 100% SSL/TLS standard compliant, but in
+ # practice often causes hanging connections with brain-dead browsers. Use
+ # this only for browsers where you know that their SSL implementation
+ # works correctly.
+ # Notice: Most problems of broken clients are also related to the HTTP
+ # keep-alive facility, so you usually additionally want to disable
+ # keep-alive for those clients, too. Use variable "nokeepalive" for this.
+ # Similarly, one has to force some clients to use HTTP/1.0 to workaround
+ # their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and
+ # "force-response-1.0" for this.
+ BrowserMatch "MSIE [2-5]" \
+ nokeepalive ssl-unclean-shutdown \
+ downgrade-1.0 force-response-1.0
+
+ # Enable HSTS only if requested
+
+ Header always set Strict-Transport-Security "max-age=63072000; includeSubdomains"
+
+
diff --git a/src/apache/utilities/apache-utilities b/src/apache/utilities/apache-utilities
new file mode 100755
index 0000000..b21c3dd
--- /dev/null
+++ b/src/apache/utilities/apache-utilities
@@ -0,0 +1,104 @@
+#!/bin/sh
+
+# shellcheck source=src/common/utilities/common-utilities
+. "$SNAP/utilities/common-utilities"
+
+DEFAULT_HTTP_PORT="80"
+DEFAULT_HTTPS_PORT="443"
+export APACHE_PIDFILE="/tmp/pids/httpd.pid"
+
+mkdir -p "$(dirname $APACHE_PIDFILE)"
+chmod 750 "$(dirname $APACHE_PIDFILE)"
+
+restart_apache_if_running()
+{
+ if apache_is_running; then
+ # Restart apache by stopping it and letting systemd start it again.
+ pid="$(apache_pid)"
+ printf "Restarting apache... "
+ if output="$(httpd-wrapper -k stop 2>&1)"; then
+ while kill -0 "$pid" 2>/dev/null; do
+ sleep 1
+ done
+ printf "done\n"
+ else
+ printf "error\n"
+ echo "$output"
+ return 1
+ fi
+ fi
+}
+
+apache_is_running()
+{
+ [ -f "$APACHE_PIDFILE" ]
+}
+
+wait_for_apache()
+{
+ wait_for_command "Waiting for Apache" apache_is_running
+}
+
+apache_pid()
+{
+ if apache_is_running; then
+ cat "$APACHE_PIDFILE"
+ else
+ echo "Unable to get Apache PID as it's not yet running" >&2
+ echo ""
+ fi
+}
+
+apache_http_port()
+{
+ port="$(snapctl get ports.http)"
+ if [ -z "$port" ]; then
+ port="$DEFAULT_HTTP_PORT"
+ apache_set_http_port $port
+ apache_set_previous_http_port $port
+ fi
+
+ echo "$port"
+}
+
+apache_set_http_port()
+{
+ snapctl set ports.http="$1"
+}
+
+apache_https_port()
+{
+ port="$(snapctl get ports.https)"
+ if [ -z "$port" ]; then
+ port="$DEFAULT_HTTPS_PORT"
+ apache_set_https_port $port
+ apache_set_previous_https_port $port
+ fi
+
+ echo "$port"
+}
+
+apache_set_https_port()
+{
+ snapctl set ports.https="$1"
+}
+
+apache_previous_http_port()
+{
+ snapctl get private.ports.http
+}
+
+apache_set_previous_http_port()
+{
+ snapctl set private.ports.http="$1"
+}
+
+apache_previous_https_port()
+{
+ snapctl get private.ports.https
+}
+
+apache_set_previous_https_port()
+{
+ snapctl set private.ports.https="$1"
+}
diff --git a/src/common/utilities/common-utilities b/src/common/utilities/common-utilities
index 4698ac6..235a19a 100755
--- a/src/common/utilities/common-utilities
+++ b/src/common/utilities/common-utilities
@@ -1,5 +1,10 @@
#!/bin/sh
+SNAP_CURRENT="$(realpath -s "$SNAP/../current")"
+SNAP_DATA_CURRENT="$(realpath -s "$SNAP_DATA/../current")"
+export SNAP_CURRENT
+export SNAP_DATA_CURRENT
+
stdout_is_a_terminal()
{
[ -t 1 ]
@@ -50,12 +55,41 @@ wait_for_command()
fi
}
+get_previous_snap_version()
+{
+ snapctl get private.snap.version
+}
+
+set_previous_snap_version()
+{
+ snapctl set private.snap.version="$1"
+}
+
enable_maintenance_mode()
{
- run_command "Enabling maintenance mode" occ -n maintenance:mode --on
+ if run_command "Enabling maintenance mode" occ -n maintenance:mode --on; then
+ # The opcache might cache changes for one second. Wait for two to be safe.
+ sleep 2
+ return 0
+ fi
+ return 1
}
disable_maintenance_mode()
{
- run_command "Disabling maintenance mode" occ -n maintenance:mode --off
+ if run_command "Disabling maintenance mode" occ -n maintenance:mode --off; then
+ # The opcache might cache changes for one second. Wait for two to be safe.
+ sleep 2
+ return 0
+ fi
+ return 1
+}
+
+version_less_than()
+{
+ if [ "$1" = "$2" ]; then
+ return 1
+ fi
+
+ printf "%s\n%s" "$1" "$2" | sort -VC
}
diff --git a/src/delay-on-failure/bin/delay-on-failure b/src/delay-on-failure/bin/delay-on-failure
new file mode 100755
index 0000000..d4ed00a
--- /dev/null
+++ b/src/delay-on-failure/bin/delay-on-failure
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+if ! "$@"; then
+ sleep 10 # Don't exit right away, so we'll be respawned
+ exit 1
+fi
+exit 0
diff --git a/src/hooks/bin/post-refresh b/src/hooks/bin/post-refresh
index 8fdffe0..cac3820 100755
--- a/src/hooks/bin/post-refresh
+++ b/src/hooks/bin/post-refresh
@@ -1,7 +1,9 @@
#!/usr/bin/env python3
import os
import shutil
+from subprocess import check_output, CalledProcessError
+SNAP = os.environ.get('SNAP')
SNAP_DATA = os.environ.get('SNAP_DATA')
TRANSPILE_DIR = '{}/.transpile'.format(SNAP_DATA)
@@ -10,3 +12,10 @@ if os.path.isdir(TRANSPILE_DIR):
shutil.rmtree(TRANSPILE_DIR)
while os.path.exists(TRANSPILE_DIR): # Wait until the folder is gone
pass
+
+try:
+ check_output([
+ '{}/bin/run-snap-migrations'.format(SNAP),
+ ])
+except CalledProcessError:
+ sys.exit(1)
diff --git a/src/hooks/bin/pre-refresh b/src/hooks/bin/pre-refresh
new file mode 100755
index 0000000..7a25a71
--- /dev/null
+++ b/src/hooks/bin/pre-refresh
@@ -0,0 +1,7 @@
+#!/bin/sh -e
+
+# shellcheck source=src/apache/utilities/apache-utilities
+. "$SNAP/utilities/apache-utilities"
+
+# By waiting for Apache we ensure that Fidus Writer is setup and fully-updated
+wait_for_apache
diff --git a/src/hooks/utilities/configuration-utilities b/src/hooks/utilities/configuration-utilities
new file mode 100755
index 0000000..c6f04c1
--- /dev/null
+++ b/src/hooks/utilities/configuration-utilities
@@ -0,0 +1,55 @@
+#!/bin/sh
+
+DEFAULT_MODE="production"
+
+debug_mode_enabled()
+{
+ [ "$(_get_mode)" = "debug" ]
+}
+
+enable_debug_mode()
+{
+ _set_mode "debug"
+}
+
+production_mode_enabled()
+{
+ [ "$(_get_mode)" = "production" ]
+}
+
+enable_production_mode()
+{
+ _set_mode "production"
+}
+
+mode_has_changed()
+{
+ [ "$(_get_mode)" != "$(_get_previous_mode)" ]
+}
+
+_get_mode()
+{
+ mode="$(snapctl get mode)"
+ if [ -z "$mode" ]; then
+ mode="$DEFAULT_MODE"
+ _set_mode "$mode"
+ fi
+
+ echo "$mode"
+}
+
+_set_mode()
+{
+ snapctl set mode="$1"
+ _set_previous_mode "$1"
+}
+
+_get_previous_mode()
+{
+ snapctl get private.mode
+}
+
+_set_previous_mode()
+{
+ snapctl set private.mode="$1"
+}
\ No newline at end of file
diff --git a/src/https/bin/disable-https b/src/https/bin/disable-https
new file mode 100755
index 0000000..66b9d6a
--- /dev/null
+++ b/src/https/bin/disable-https
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+# shellcheck source=src/https/utilities/https-utilities
+. "$SNAP/utilities/https-utilities"
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "This utility needs to run as root"
+ exit 1
+fi
+
+if certificates_are_active; then
+ printf "Deactivating HTTPS... "
+ deactivate_certificates
+ printf "done\n"
+ restart_apache_if_running
+else
+ echo "HTTPS doesn't seem enabled"
+ exit 1
+fi
diff --git a/src/https/bin/enable-https b/src/https/bin/enable-https
new file mode 100755
index 0000000..0816fed
--- /dev/null
+++ b/src/https/bin/enable-https
@@ -0,0 +1,272 @@
+#!/bin/sh
+
+# shellcheck source=src/https/utilities/https-utilities
+. "$SNAP/utilities/https-utilities"
+
+COMMAND="fiduswriter.enable-https"
+
+print_usage()
+{
+ echo "Usage:"
+ echo " $COMMAND -h"
+ echo " Display this help message."
+ echo ""
+ echo " $COMMAND [OPTIONS]"
+ echo " Run the provided subcommand."
+ echo ""
+ echo "Available subcommands:"
+ echo " lets-encrypt [OPTIONS]"
+ echo " Obtain a certificate from Let's Encrypt and automatically keep it"
+ echo " up-to-date."
+ echo ""
+ echo " self-signed"
+ echo " Generate and use a self-signed certificate. This is easier to"
+ echo " setup than Let's Encrypt certificates, but will cause warnings in"
+ echo " browsers."
+ echo ""
+ echo " custom [OPTIONS]"
+ echo " Use certificates generated by other means."
+}
+
+handle_lets_encrypt()
+{
+ extra_params=""
+ dry_run=false
+
+ while getopts ":dth" opt; do
+ case $opt in
+ d)
+ extra_params="$extra_params --dry-run"
+ dry_run=true
+ ;;
+ t) extra_params="$extra_params --test-cert";;
+ h)
+ echo "Usage:"
+ echo " $COMMAND lets-encrypt [-h -t -d]"
+ echo ""
+ echo " Obtain a certificate from Let's Encrypt and"
+ echo " automatically keep it up to date."
+ echo ""
+ echo " -h: Display this help message"
+ echo " -t: Obtain a test certificate. This is a valid Let's"
+ echo " Encrypt certificate, but is not signed by a"
+ echo " recognized CA, so browsers will show a warning."
+ echo " -d: Dry run: don't actually obtain/install"
+ echo " certificates from Let's Encrypt, but make sure"
+ echo " it's possible."
+ exit 0
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ exit 1
+ ;;
+ esac
+ done
+
+ echo "In order for Let's Encrypt to verify that you actually own the"
+ echo "domain(s) for which you're requesting a certificate, there are a"
+ echo "number of requirements of which you need to be aware:"
+ echo ""
+
+ echo "1. In order to register with the Let's Encrypt ACME server, you must"
+ echo " agree to the currently-in-effect Subscriber Agreement located"
+ echo " here:"
+ echo ""
+ echo " https://letsencrypt.org/repository/"
+ echo ""
+ echo " By continuing to use this tool you agree to these terms. Please"
+ echo " cancel now if otherwise."
+ echo ""
+
+ echo "2. You must have the domain name(s) for which you want certificates"
+ echo " pointing at the external IP address of this machine."
+ echo ""
+
+ echo "3. Both ports 80 and 443 on the external IP address of this machine"
+ echo " must point to this machine (e.g. port forwarding might need to be"
+ echo " setup on your router)."
+ echo ""
+
+ while true; do
+ printf "Have you met these requirements? (y/n) "
+ read -r answer
+ case $answer in
+ [Yy]* ) break;;
+ [Nn]* ) exit;;
+ * ) echo "Please answer yes or no.";;
+ esac
+ done
+
+ printf "Please enter an email address (for urgent notices or key recovery): "
+ read -r email
+
+ domains=""
+ printf "Please enter your domain name(s) (space-separated): "
+ read -r answer
+ for domain in $answer; do
+ domains="$domains -d $domain"
+ done
+
+ printf "Attempting to obtain certificates... "
+ # Building CLI commands, so we don't WANT to quote some of these (they need
+ # to be separated by whitespace): disable the check
+ # shellcheck disable=SC2086
+ if ! output="$(run_certbot certonly $extra_params \
+ --authenticator webroot \
+ --webroot-path "$CERTBOT_DIRECTORY" \
+ --rsa-key-size 4096 \
+ --email "$email" \
+ --non-interactive \
+ --agree-tos \
+ --force-renewal \
+ $domains 2>&1)"; then
+ printf "error running certbot:\n\n" >&2
+ echo "$output" >&2
+ exit 1
+ fi
+
+ echo "done"
+ if [ "$dry_run" = true ]; then
+ echo "Looks like you're ready for HTTPS!"
+ else
+ activate_certbot_certificate
+ fi
+}
+
+handle_self_signed()
+{
+ while getopts ":h" opt; do
+ case $opt in
+ h)
+ echo "Usage:"
+ echo " $COMMAND self-signed [-h]"
+ echo ""
+ echo " Generate and use a self-signed certificate. This is"
+ echo " easier to setup than Let's Encrypt certificates, but"
+ echo " will cause warnings in browsers."
+ echo ""
+ echo " -h: Display this help message"
+ exit 0
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ exit 1
+ ;;
+ esac
+ done
+
+ printf "Generating key and self-signed certificate... "
+ if ! output="$(generate_self_signed_certificate 2>&1)"; then
+ printf "error:\n" >&2
+ echo "$output" >&2
+ exit 1
+ fi
+
+ printf "done\n"
+ activate_self_signed_certificate
+}
+
+handle_custom()
+{
+ enable_hsts=false
+
+ while getopts ":hs" opt; do
+ case $opt in
+ h)
+ echo "Usage:"
+ echo " $COMMAND custom [-h -s] "
+ echo ""
+ echo " Use certificates generated by other means. Note that"
+ echo " the files provided to this command must be readable"
+ echo " by the snap, which means they must contained in one"
+ echo " of four directory trees:"
+ echo " - $SNAP_CURRENT"
+ echo " - $SNAP_COMMON"
+ echo " - $SNAP_USER_DATA"
+ echo " - $SNAP_USER_COMMON"
+ echo ""
+ echo " Also note that this command will create copies of the"
+ echo " files provided; if this command completes"
+ echo " successfully, they can be safely removed."
+ echo ""
+ echo " -h: Display this help message."
+ echo " -s: Enable HTTP Strict Transport Security (HSTS)"
+ echo " (default is off-- leave off if self-signed)."
+ exit 0
+ ;;
+ s)
+ enable_hsts=true
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ exit 1
+ ;;
+ esac
+ done
+ shift $((OPTIND-1))
+
+ if [ $# -ne 3 ]; then
+ printf "This subcommand requires three positional parameters: " >&2
+ printf "\n" >&2
+ exit 1
+ fi
+
+ certificate=$1
+ private_key=$2
+ chain=$3
+
+ printf "Installing custom certificate... "
+ if ! output="$(install_custom_certificate \
+ "$certificate" "$private_key" "$chain" \
+ "$enable_hsts" 2>&1)"; then
+ echo "error:" >&2
+ echo "$output" >&2
+ exit 1
+ fi
+
+ printf "done\n"
+ activate_custom_certificate
+}
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "This utility needs to run as root"
+ exit 1
+fi
+
+# Parse options for the base command
+while getopts ":h" opt; do
+ case $opt in
+ h)
+ print_usage
+ exit 0
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ exit 1
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+if [ $# = 0 ]; then
+ echo "Missing subcommand. Run '$COMMAND -h' for help." >&2
+ exit 1
+fi
+
+subcommand=$1
+shift # Remove subcommand from args
+case $subcommand in
+ lets-encrypt)
+ handle_lets_encrypt "$@"
+ ;;
+ self-signed)
+ handle_self_signed "$@"
+ ;;
+ custom)
+ handle_custom "$@"
+ ;;
+ *)
+ echo "No such subcommand: $subcommand. Run '$COMMAND -h' for help." >&2
+ exit 1
+ ;;
+esac
diff --git a/src/https/bin/renew-certs b/src/https/bin/renew-certs
new file mode 100755
index 0000000..fc52bd3
--- /dev/null
+++ b/src/https/bin/renew-certs
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+# shellcheck source=src/https/utilities/https-utilities
+. "$SNAP/utilities/https-utilities"
+
+# The number of seconds remaining in the validity of the certificate
+# before renewing it. 2592000 seconds is 30 days.
+seconds_to_renew=2592000
+
+while true; do
+ if [ -f "$SELF_SIGNED_CERT" ]; then
+ # Check the self-signed certificate. Does it need to be renewed?
+ cert_date="$(openssl x509 -noout -enddate -in "$SELF_SIGNED_CERT" | sed -e 's/.*=\(.*\)$/\1/')"
+ cert_date="$(date -d "$cert_date" "+%s")"
+ current_date=$(date "+%s")
+ difference=$((cert_date-current_date))
+ if [ $difference -lt $seconds_to_renew ]; then
+ echo "Renewing self-signed certificate"
+ generate_self_signed_certificate
+ restart_apache_if_running
+ else
+ echo "Self-signed certificates aren't due for renewal"
+ fi
+ fi
+
+ # No need to check the Let's Encrypt certificates-- they'll only
+ # renew if they're within 30 days of expiration.
+ run_certbot renew --post-hook "restart-apache"
+
+ sleep 1d # Run once a day
+done
diff --git a/src/https/bin/restart-apache b/src/https/bin/restart-apache
new file mode 100755
index 0000000..e106321
--- /dev/null
+++ b/src/https/bin/restart-apache
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+# shellcheck source=src/https/utilities/https-utilities
+. "$SNAP/utilities/https-utilities"
+
+restart_apache_if_running
diff --git a/src/https/certbot_fiduswriter_plugin/__init__.py b/src/https/certbot_fiduswriter_plugin/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/https/certbot_fiduswriter_plugin/webroot.py b/src/https/certbot_fiduswriter_plugin/webroot.py
new file mode 100644
index 0000000..a9bc15f
--- /dev/null
+++ b/src/https/certbot_fiduswriter_plugin/webroot.py
@@ -0,0 +1,141 @@
+"""Fidus Writer Webroot plugin."""
+import argparse
+import collections
+import errno
+import json
+import logging
+import os
+
+import six
+import zope.component
+import zope.interface
+
+from acme import challenges
+
+from certbot import cli
+from certbot import errors
+from certbot import interfaces
+from certbot.display import util as display_util
+from certbot.plugins import common
+
+
+logger = logging.getLogger(__name__)
+
+
+@zope.interface.implementer(interfaces.IAuthenticator)
+@zope.interface.provider(interfaces.IPluginFactory)
+class Authenticator(common.Plugin):
+ """Fidus Writer Webroot Authenticator."""
+
+ description = "Place files in webroot directory without running chown"
+
+ MORE_INFO = """\
+Authenticator plugin that performs http-01 challenge by saving
+necessary validation resources to appropriate paths on the file
+system. It expects that there is some other HTTP server configured
+to serve all files under specified web root ({0})."""
+
+ def more_info(self): # pylint: disable=missing-docstring,no-self-use
+ return self.MORE_INFO.format(self.conf("path"))
+
+ @classmethod
+ def add_parser_arguments(cls, add):
+ add("path", type=str, default='', help="public_html / webroot path")
+ add("map", default={}, help="Not used. Left for backward compatibility.")
+
+ def get_chall_pref(self, domain): # pragma: no cover
+ # pylint: disable=missing-docstring,no-self-use,unused-argument
+ return [challenges.HTTP01]
+
+ def __init__(self, *args, **kwargs):
+ super(Authenticator, self).__init__(*args, **kwargs)
+ self.full_roots = {}
+ self.performed = collections.defaultdict(set)
+
+ def prepare(self): # pylint: disable=missing-docstring
+ pass
+
+ def perform(self, achalls): # pylint: disable=missing-docstring
+ webroot_path = self.conf("path")
+ if not webroot_path:
+ raise errors.PluginError("Missing path")
+
+ # The previous version had this as an array, but it gets loaded as
+ # a string. Just strip off the braces and quotes.
+ setattr(self.config, self.dest("path"), webroot_path.strip("[]'"))
+ logger.info("Using the webroot path %s for all domains.",
+ self.conf("path"))
+
+ self._create_challenge_dirs(achalls)
+
+ return [self._perform_single(achall) for achall in achalls]
+
+ def _create_challenge_dirs(self, achalls):
+ for achall in achalls:
+ self.full_roots[achall.domain] = os.path.join(
+ self.conf("path"), challenges.HTTP01.URI_ROOT_PATH)
+
+ logger.debug("Creating root challenges validation dir at %s",
+ self.conf("path"))
+
+ # Change the permissions to be writable (GH #1389)
+ # Umask is used instead of chmod to ensure the client can also
+ # run as non-root (GH #1795)
+ old_umask = os.umask(0o022)
+
+ try:
+ # This is coupled with the "umask" call above because
+ # os.makedirs's "mode" parameter may not always work:
+ # https://stackoverflow.com/questions/5231901/permission-problems-when-creating-a-dir-with-os-makedirs-python
+ os.makedirs(self.full_roots[achall.domain], 0o0755)
+
+ except OSError as exception:
+ if exception.errno != errno.EEXIST:
+ raise errors.PluginError(
+ "Couldn't create root for {0} http-01 "
+ "challenge responses: {1}", achall.domain, exception)
+ finally:
+ os.umask(old_umask)
+
+ def _get_validation_path(self, root_path, achall):
+ return os.path.join(root_path, achall.chall.encode("token"))
+
+ def _perform_single(self, achall):
+ response, validation = achall.response_and_validation()
+
+ root_path = self.full_roots[achall.domain]
+ validation_path = self._get_validation_path(root_path, achall)
+ logger.debug("Attempting to save validation to %s", validation_path)
+
+ # Change permissions to be world-readable, owner-writable (GH #1795)
+ old_umask = os.umask(0o022)
+
+ try:
+ with open(validation_path, "wb") as validation_file:
+ validation_file.write(validation.encode())
+ finally:
+ os.umask(old_umask)
+
+ self.performed[root_path].add(achall)
+
+ return response
+
+ def cleanup(self, achalls): # pylint: disable=missing-docstring
+ for achall in achalls:
+ root_path = self.full_roots.get(achall.domain, None)
+ if root_path is not None:
+ validation_path = self._get_validation_path(root_path, achall)
+ logger.debug("Removing %s", validation_path)
+ os.remove(validation_path)
+ self.performed[root_path].remove(achall)
+
+ for root_path, achalls in six.iteritems(self.performed):
+ if not achalls:
+ try:
+ os.rmdir(root_path)
+ logger.debug("All challenges cleaned up, removing %s",
+ root_path)
+ except OSError as exc:
+ logger.info(
+ "Unable to clean up challenge directory %s", root_path)
+ logger.debug("Error was: %s", exc)
diff --git a/src/https/requirements.txt b/src/https/requirements.txt
new file mode 100644
index 0000000..9504c54
--- /dev/null
+++ b/src/https/requirements.txt
@@ -0,0 +1,30 @@
+acme==0.33.1
+asn1crypto==0.24.0
+certbot==0.33.1
+certifi==2019.3.9
+cffi==1.12.3
+chardet==3.0.4
+ConfigArgParse==0.14.0
+configobj==5.0.6
+cryptography==2.6.1
+future==0.17.1
+idna==2.8
+josepy==1.1.0
+mock==2.0.0
+parsedatetime==2.4
+pbr==5.1.3
+pycparser==2.19
+pyOpenSSL==19.0.0
+pyRFC3339==1.1
+pytz==2019.1
+requests==2.21.0
+requests-toolbelt==0.9.1
+six==1.12.0
+urllib3==1.24.2
+zope.component==4.5
+zope.deferredimport==4.3
+zope.deprecation==4.4.0
+zope.event==4.4
+zope.hookable==4.2.0
+zope.interface==4.6.0
+zope.proxy==4.3.1
diff --git a/src/https/setup.py b/src/https/setup.py
new file mode 100644
index 0000000..e58fd03
--- /dev/null
+++ b/src/https/setup.py
@@ -0,0 +1,16 @@
+from setuptools import setup, find_packages
+
+
+setup(
+ name='fiduswriter',
+ packages=find_packages(),
+ install_requires=[
+ 'certbot',
+ 'zope.interface',
+ ],
+ entry_points={
+ 'certbot.plugins': [
+ 'webroot = certbot_fiduswriter_plugin.webroot:Authenticator',
+ ],
+ },
+)
diff --git a/src/https/utilities/https-utilities b/src/https/utilities/https-utilities
new file mode 100755
index 0000000..a9edebe
--- /dev/null
+++ b/src/https/utilities/https-utilities
@@ -0,0 +1,140 @@
+#!/bin/sh
+
+# shellcheck source=src/apache/utilities/apache-utilities
+. "$SNAP/utilities/apache-utilities"
+
+SNAP_CURRENT="$(dirname "$SNAP_DATA")/current"
+
+LIVE_CERTS_DIRECTORY="$SNAP_CURRENT/certs/live"
+
+SELF_SIGNED_DIRECTORY="$SNAP_CURRENT/certs/self-signed"
+SELF_SIGNED_KEY="$SELF_SIGNED_DIRECTORY/privkey.pem"
+SELF_SIGNED_CERT="$SELF_SIGNED_DIRECTORY/cert.pem"
+SELF_SIGNED_CHAIN="$SELF_SIGNED_DIRECTORY/chain.pem"
+
+CUSTOM_DIRECTORY="$SNAP_CURRENT/certs/custom"
+CUSTOM_KEY="$CUSTOM_DIRECTORY/privkey.pem"
+CUSTOM_CERT="$CUSTOM_DIRECTORY/cert.pem"
+CUSTOM_CHAIN="$CUSTOM_DIRECTORY/chain.pem"
+CUSTOM_ENABLE_HSTS="$CUSTOM_DIRECTORY/hsts"
+
+CERTBOT_DIRECTORY="$SNAP_CURRENT/certs/certbot"
+CERTBOT_LIVE_DIRECTORY="$CERTBOT_DIRECTORY/config/live"
+
+# If this function is run multiple times it will replace the certificate
+# and key if they're already present.
+generate_self_signed_certificate()
+{
+ mkdir -p "$(dirname "$SELF_SIGNED_KEY")"
+ mkdir -p "$(dirname "$SELF_SIGNED_CERT")"
+ mkdir -p "$(dirname "$SELF_SIGNED_CHAIN")"
+ chmod 750 "$(dirname "$SELF_SIGNED_KEY")"
+ chmod 750 "$(dirname "$SELF_SIGNED_CERT")"
+ chmod 750 "$(dirname "$SELF_SIGNED_CHAIN")"
+
+ openssl req -newkey rsa:4096 -nodes -keyout "$SELF_SIGNED_KEY" \
+ -x509 -days 90 -out "$SELF_SIGNED_CERT" -subj "/O=FidusWriter"
+
+ rm -f "$SELF_SIGNED_CHAIN"
+ ln -s "$SELF_SIGNED_CERT" "$SELF_SIGNED_CHAIN"
+}
+
+activate_self_signed_certificate()
+{
+ deactivate_certificates
+ ln -s "$SELF_SIGNED_DIRECTORY" "$LIVE_CERTS_DIRECTORY"
+ restart_apache_if_running
+}
+
+self_signed_certificates_are_active()
+{
+ live_path="$(realpath "$LIVE_CERTS_DIRECTORY")"
+ self_signed_path="$(realpath "$SELF_SIGNED_DIRECTORY")"
+
+ [ "$live_path" = "$self_signed_path" ]
+}
+
+# If this function is run multiple times it will replace the certificate
+# and key if they're already present.
+install_custom_certificate()
+{
+ enable_hsts=$4
+
+ mkdir -p "$(dirname "$CUSTOM_KEY")"
+ mkdir -p "$(dirname "$CUSTOM_CERT")"
+ mkdir -p "$(dirname "$CUSTOM_CHAIN")"
+ chmod 750 "$(dirname "$CUSTOM_KEY")"
+ chmod 750 "$(dirname "$CUSTOM_CERT")"
+ chmod 750 "$(dirname "$CUSTOM_CHAIN")"
+
+ cp "$1" "$CUSTOM_CERT"
+ cp "$2" "$CUSTOM_KEY"
+ cp "$3" "$CUSTOM_CHAIN"
+
+ if [ "$enable_hsts" = true ]; then
+ touch "$CUSTOM_ENABLE_HSTS"
+ else
+ rm -f "$CUSTOM_ENABLE_HSTS"
+ fi
+}
+
+activate_custom_certificate()
+{
+ deactivate_certificates
+ ln -s "$CUSTOM_DIRECTORY" "$LIVE_CERTS_DIRECTORY"
+ restart_apache_if_running
+}
+
+custom_certificates_are_active()
+{
+ live_path="$(realpath "$LIVE_CERTS_DIRECTORY")"
+ custom_path="$(realpath "$CUSTOM_DIRECTORY")"
+
+ [ "$live_path" = "$custom_path" ]
+}
+
+certificates_are_active()
+{
+ [ -e "$LIVE_CERTS_DIRECTORY" ]
+}
+
+deactivate_certificates()
+{
+ rm -rf "$LIVE_CERTS_DIRECTORY"
+}
+
+activate_certbot_certificate()
+{
+ # There shouldn't be multiple domains here since we have no way to
+ # support them, but account for the possibility by simply taking the
+ # first domain's certificates. Ignore any READMEs.
+ certdir="$(find "$CERTBOT_LIVE_DIRECTORY" -maxdepth 1 -mindepth 1 -not -iname readme -printf "%P\n" | sort -n | head -1)"
+
+ deactivate_certificates
+ ln -s "$CERTBOT_LIVE_DIRECTORY/$certdir" "$LIVE_CERTS_DIRECTORY"
+ restart_apache_if_running
+}
+
+should_enable_hsts()
+{
+ # Don't enable HSTS for self-signed certs
+ if self_signed_certificates_are_active; then
+ return 1
+ fi
+
+ # Don't enable HSTS for custom certificates unless requested
+ if custom_certificates_are_active && [ ! -f "$CUSTOM_ENABLE_HSTS" ]; then
+ return 1
+ fi
+
+ # For everything else (i.e. Let's Encrypt), enable it.
+ return 0
+}
+
+# Run a certbot instance that writes to snap-writable data.
+run_certbot()
+{
+ certbot --text --config-dir "$CERTBOT_DIRECTORY/config" \
+ --work-dir "$CERTBOT_DIRECTORY/work" \
+ --logs-dir "$CERTBOT_DIRECTORY/logs" "$@"
+}
diff --git a/src/import-export/bin/export-data b/src/import-export/bin/export-data
new file mode 100755
index 0000000..5e41547
--- /dev/null
+++ b/src/import-export/bin/export-data
@@ -0,0 +1,160 @@
+#!/bin/sh
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "This utility needs to run as root"
+ exit 1
+fi
+
+# shellcheck source=src/nextcloud/utilities/nextcloud-utilities
+. "$SNAP/utilities/nextcloud-utilities"
+# shellcheck source=src/mysql/utilities/mysql-utilities
+. "$SNAP/utilities/mysql-utilities"
+# shellcheck source=src/common/utilities/common-utilities
+. "$SNAP/utilities/common-utilities"
+
+# shellcheck disable=SC2119
+wait_for_mysql
+
+COMMAND="nextcloud.export"
+BACKUP_DIRECTORY="${SNAP_COMMON}/backups"
+FORMAT="1"
+
+print_usage()
+{
+ echo "Usage:"
+ echo " $COMMAND [OPTIONS]"
+ echo " Export data suitable for migrating servers. By default this"
+ echo " includes the Nextcloud database, configuration, and data"
+ echo " (equivalent to running $COMMAND -abcd)."
+ echo ""
+ echo "Available options:"
+ echo " -h: Display this help message"
+ echo " -a: Include the (non-default) apps"
+ echo " -b: Include the database"
+ echo " -c: Include the config"
+ echo " -d: Include the data (can be quite large)"
+}
+
+export_apps()
+{
+ backup="$1"
+ echo "Exporting apps..."
+ if ! rsync -ah --info=progress2 "$SNAP_DATA/nextcloud/extra-apps/" "${backup}/apps"; then
+ echo "Unable to export apps"
+ exit 1
+ fi
+}
+
+export_database()
+{
+ backup="$1"
+ echo "Exporting database..."
+ if ! mysqldump --defaults-file="$MYSQL_ROOT_OPTION_FILE" \
+ --lock-tables nextcloud > "${backup}/database.sql"; then
+ echo "Unable to export database"
+ exit 1
+ fi
+}
+
+export_config()
+{
+ backup="$1"
+ config_backup="${backup}/configuration.py"
+
+ # Mask out the config password. We don't need it when restoring.
+ echo "Exporting config..."
+ if ! sed "s/\(dbpassword.*=>\s*\).*,/\1'DBPASSWORD',/" \
+ "${SNAP_DATA}/configuration.py" > "$config_backup"; then
+ echo "Unable to export config"
+ exit 1
+ fi
+}
+
+export_data()
+{
+ backup="$1"
+ echo "Exporting data..."
+ if ! rsync -ah --info=progress2 "${NEXTCLOUD_DATA_DIR%/}/" "${backup}/data"; then
+ echo "Unable to export data"
+ exit 1
+ fi
+}
+
+do_export_apps=false
+do_export_database=false
+do_export_config=false
+do_export_data=false
+
+# If no parameters are specified, default to exporting everything
+if [ $# -eq 0 ]; then
+ do_export_apps=true
+ do_export_database=true
+ do_export_config=true
+ do_export_data=true
+fi
+
+while getopts ":abcdh" opt; do
+ case $opt in
+ a)
+ do_export_apps=true
+ ;;
+ b)
+ do_export_database=true
+ ;;
+ c)
+ do_export_config=true
+ ;;
+ d)
+ do_export_data=true
+ ;;
+ h)
+ print_usage
+ exit 0
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ exit 1
+ ;;
+ esac
+done
+
+# Clear options
+shift "$((OPTIND-1))"
+
+echo "WARNING: This functionality is still experimental and under" >&2
+echo "development, use at your own risk. Note that the CLI interface is" >&2
+echo "unstable, so beware if using from within scripts." >&2
+echo "" >&2
+
+backup="${BACKUP_DIRECTORY}/$(date +%Y%m%d-%H%M%S)"
+
+mkdir -p "$backup"
+chmod 750 "$backup"
+
+echo "$FORMAT" > "${backup}/format"
+
+# Enable maintenance mode so data can't change out from under us
+if ! enable_maintenance_mode; then
+ echo "Unable to enter maintenance mode"
+ exit 1
+fi
+trap 'disable_maintenance_mode' EXIT
+
+if [ "$do_export_apps" = true ]; then
+ export_apps "$backup"
+fi
+
+if [ "$do_export_database" = true ]; then
+ export_database "$backup"
+fi
+
+if [ "$do_export_config" = true ]; then
+ export_config "$backup"
+fi
+
+if [ "$do_export_data" = true ]; then
+ export_data "$backup"
+fi
+
+echo ""
+echo "Successfully exported $backup"
diff --git a/src/import-export/bin/import-data b/src/import-export/bin/import-data
new file mode 100755
index 0000000..3319479
--- /dev/null
+++ b/src/import-export/bin/import-data
@@ -0,0 +1,169 @@
+#!/bin/sh
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "This utility needs to run as root"
+ exit 1
+fi
+
+# shellcheck source=src/nextcloud/utilities/nextcloud-utilities
+. "$SNAP/utilities/nextcloud-utilities"
+# shellcheck source=src/mysql/utilities/mysql-utilities
+. "$SNAP/utilities/mysql-utilities"
+# shellcheck source=src/common/utilities/common-utilities
+. "$SNAP/utilities/common-utilities"
+
+# shellcheck disable=SC2119
+wait_for_mysql
+
+COMMAND="nextcloud.import"
+
+print_usage()
+{
+ echo "Usage:"
+ echo " $COMMAND [OPTIONS] "
+ echo " Import data exported from another Nextcloud snap instance."
+ echo " By default this imports the database, config, and data"
+ echo " (equivalent to running $COMMAND -abcd)."
+ echo ""
+ echo "Available options:"
+ echo " -h: Display this help message"
+ echo " -a: Import the (non-default) apps"
+ echo " -b: Import the database"
+ echo " -c: Import the config"
+ echo " -d: Import the data"
+}
+
+import_apps()
+{
+ backup_dir="${1%/}"
+ apps_backup="${backup_dir}/apps"
+ run_command "Clearing existing non-default apps" rm -rf "$SNAP_DATA/nextcloud/extra-apps"
+ echo "Importing apps..."
+ if ! rsync -ah --info=progress2 "$apps_backup/" "$SNAP_DATA/nextcloud/extra-apps"; then
+ echo "Unable to import apps"
+ exit 1
+ fi
+}
+
+import_database()
+{
+ backup_dir="$1"
+ database_backup="${backup_dir}/database.sql"
+
+ # First, drop the database (if any)
+ run_command "Dropping existing database" run-mysql -e "DROP DATABASE nextcloud"
+ run_command "Creating new database" run-mysql -e "CREATE DATABASE nextcloud"
+ run_command "Granting database privileges to existing user" \
+ run-mysql -e "GRANT ALL PRIVILEGES ON nextcloud.* TO 'nextcloud'@'localhost'"
+
+
+ # Now restore the database
+ echo "Importing database..."
+ if ! run-mysql nextcloud < "$database_backup"; then
+ echo "Unable to import database"
+ exit 1
+ fi
+}
+
+import_config()
+{
+ backup_dir="$1"
+ config_backup="${backup_dir}/configuration.py"
+ database_password="$(mysql_get_nextcloud_password)"
+
+ # Import the config, but set our new database password
+ echo "Importing config..."
+ if ! sed "s/DBPASSWORD/$database_password/" \
+ "$config_backup" > "${SNAP_DATA}/configuration.py"; then
+ echo "Unable to import config"
+ exit 1
+ fi
+}
+
+import_data()
+{
+ backup_dir="${1%/}"
+ data_backup="${backup_dir}/data"
+ run_command "Clearing existing data" rm -rf "$NEXTCLOUD_DATA_DIR"
+ echo "Importing data..."
+ if ! rsync -ah --info=progress2 "$data_backup/" "$NEXTCLOUD_DATA_DIR"; then
+ echo "Unable to import data"
+ exit 1
+ fi
+}
+
+do_import_apps=false
+do_import_database=false
+do_import_config=false
+do_import_data=false
+
+# If no parameters are specified, default to importing everything
+if [ $# -eq 1 ]; then
+ do_import_apps=true
+ do_import_database=true
+ do_import_config=true
+ do_import_data=true
+fi
+
+while getopts ":abcdh" opt; do
+ case $opt in
+ a)
+ do_import_apps=true
+ ;;
+ b)
+ do_import_database=true
+ ;;
+ c)
+ do_import_config=true
+ ;;
+ d)
+ do_import_data=true
+ ;;
+ h)
+ print_usage
+ exit 0
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ exit 1
+ ;;
+ esac
+done
+
+# Clear options
+shift "$((OPTIND-1))"
+
+echo "WARNING: This functionality is still experimental and under" >&2
+echo "development, use at your own risk. Note that the CLI interface is" >&2
+echo "unstable, so beware if using from within scripts." >&2
+echo "" >&2
+
+# Enable maintenance mode so data can't change out from under us
+if nextcloud_is_installed && ! enable_maintenance_mode; then
+ echo "Unable to enter maintenance mode"
+ exit 1
+fi
+trap 'disable_maintenance_mode' EXIT
+
+backup_dir="$1"
+if [ -z "$backup_dir" ]; then
+ echo "Missing parameter "
+ print_usage
+ exit 1
+fi
+
+if [ "$do_import_apps" = true ]; then
+ import_apps "$backup_dir"
+fi
+
+if [ "$do_import_database" = true ]; then
+ import_database "$backup_dir"
+fi
+
+if [ "$do_import_config" = true ]; then
+ import_config "$backup_dir"
+fi
+
+if [ "$do_import_data" = true ]; then
+ import_data "$backup_dir"
+fi
diff --git a/src/logrotate/bin/run-logrotate b/src/logrotate/bin/run-logrotate
new file mode 100755
index 0000000..14d6bbe
--- /dev/null
+++ b/src/logrotate/bin/run-logrotate
@@ -0,0 +1,28 @@
+#!/bin/sh -e
+
+# shellcheck source=src/logrotate/utilities/logrotate-utilities
+. "$SNAP/utilities/logrotate-utilities"
+
+# Clean non existent log file entries from status file
+test -e "$LOGROTATE_STATUS_FILE" || touch "$LOGROTATE_STATUS_FILE"
+head -1 "$LOGROTATE_STATUS_FILE" > "${LOGROTATE_STATUS_FILE}.clean"
+sed '1d; s/"//g' "$LOGROTATE_STATUS_FILE" | while read -r logfile date
+do
+ [ -e "$logfile" ] && echo "\"$logfile\" $date"
+done >> "${LOGROTATE_STATUS_FILE}.clean"
+mv "${LOGROTATE_STATUS_FILE}.clean" "$LOGROTATE_STATUS_FILE"
+
+# logrotate doesn't support environment variables in its configuration file,
+# so we write a converted version to disk and use that (and of course clean
+# it up afterward)
+configuration_file="$(mktemp)"
+trap 'rm -f "$configuration_file"' EXIT
+
+envsubst < "$SNAP/config/logrotate/logrotate.conf" > "$configuration_file"
+
+# If logrotate fails, it could be due to corruption in the status file. Try
+# removing it so we start with a clean slate next time around.
+if ! logrotate --verbose --state "$LOGROTATE_STATUS_FILE" "$configuration_file"; then
+ rm -f "$LOGROTATE_STATUS_FILE"
+ exit 1
+fi
diff --git a/src/logrotate/config/logrotate.conf b/src/logrotate/config/logrotate.conf
new file mode 100644
index 0000000..1f81536
--- /dev/null
+++ b/src/logrotate/config/logrotate.conf
@@ -0,0 +1,37 @@
+# Rotate log files every week
+weekly
+
+# Keep 4 weeks worth of logs
+rotate 4
+
+# Create new (empty) log files after rotating old ones
+create 640 root root
+
+# It's okay if the log file is missing
+missingok
+
+# Don't rotate log files that are empty
+notifempty
+
+# Compress logfiles, although wait until the next rotation in order to give
+# clients time to finish writing.
+compress
+delaycompress
+
+# Apache logs
+$SNAP_DATA_CURRENT/logs/apache_errors.log $SNAP_DATA_CURRENT/logs/apache_access.log {
+ postrotate
+ snapctl restart --reload $SNAP_INSTANCE_NAME.apache
+ endscript
+}
+
+# Redis logs. Note that redis reopens the log for every message, so it doesn't
+# require a postrotate
+$SNAP_DATA_CURRENT/logs/redis.log {}
+
+# MySQL logs
+$SNAP_DATA_CURRENT/logs/mysql_errors.log {
+ postrotate
+ snapctl restart --reload $SNAP_INSTANCE_NAME.mysql
+ endscript
+}
diff --git a/src/logrotate/utilities/logrotate-utilities b/src/logrotate/utilities/logrotate-utilities
new file mode 100755
index 0000000..36238f4
--- /dev/null
+++ b/src/logrotate/utilities/logrotate-utilities
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+# shellcheck source=src/common/utilities/common-utilities
+. "$SNAP/utilities/common-utilities"
+
+export LOGROTATE_STATUS_FILE="$SNAP_DATA/logrotate/status"
+
+mkdir -p "$(dirname "$LOGROTATE_STATUS_FILE")"
+chmod 750 "$(dirname "$LOGROTATE_STATUS_FILE")"
\ No newline at end of file
diff --git a/src/migrations/bin/run-snap-migrations b/src/migrations/bin/run-snap-migrations
new file mode 100755
index 0000000..b289d63
--- /dev/null
+++ b/src/migrations/bin/run-snap-migrations
@@ -0,0 +1,16 @@
+#!/bin/sh -e
+
+# shellcheck source=src/common/utilities/common-utilities
+. "$SNAP/utilities/common-utilities"
+
+previous_version="$(get_previous_snap_version)"
+
+# Now run the version-specific migrations
+migrations_directory="$SNAP/migrations"
+version_migrations="$(find "$migrations_directory" -maxdepth 1 -mindepth 1 -type d | sort -V)"
+for directory in $version_migrations; do
+ version="$(basename "$directory")"
+ if version_less_than "$previous_version" "$version"; then
+ run-parts -v --exit-on-error --regex '.*\.sh$' "$directory"
+ fi
+done
diff --git a/src/migrations/migrations/README.txt b/src/migrations/migrations/README.txt
new file mode 100644
index 0000000..84e9403
--- /dev/null
+++ b/src/migrations/migrations/README.txt
@@ -0,0 +1,4 @@
+Add migrations here. Create a folder for each version and add migrations
+inside of it. Like:
+
+3.10.9/1_upgrade.py
diff --git a/src/mysql/bin/reload-mysql b/src/mysql/bin/reload-mysql
new file mode 100755
index 0000000..c9922e0
--- /dev/null
+++ b/src/mysql/bin/reload-mysql
@@ -0,0 +1,6 @@
+#!/bin/sh -e
+
+# shellcheck source=src/mysql/utilities/mysql-utilities
+. "$SNAP/utilities/mysql-utilities"
+
+mysql_flush_logs
\ No newline at end of file
diff --git a/src/mysql/bin/run-mysql b/src/mysql/bin/run-mysql
index ccd4dd1..1072fe7 100755
--- a/src/mysql/bin/run-mysql
+++ b/src/mysql/bin/run-mysql
@@ -1,3 +1,6 @@
-#!/bin/sh
+#!/bin/sh -e
-mysql --defaults-file="$SNAP_DATA/mysql/root.ini" "$@"
+# shellcheck source=src/mysql/utilities/mysql-utilities
+. "$SNAP/utilities/mysql-utilities"
+
+mysql --defaults-file="$MYSQL_ROOT_OPTION_FILE" "$@"
diff --git a/src/mysql/bin/run-mysqldump b/src/mysql/bin/run-mysqldump
new file mode 100755
index 0000000..6870a88
--- /dev/null
+++ b/src/mysql/bin/run-mysqldump
@@ -0,0 +1,6 @@
+#!/bin/sh -e
+
+# shellcheck source=src/mysql/utilities/mysql-utilities
+. "$SNAP/utilities/mysql-utilities"
+
+mysqldump --defaults-file="$MYSQL_ROOT_OPTION_FILE" --lock-tables fiduswriter "$@"
diff --git a/src/mysql/bin/start_mysql b/src/mysql/bin/start_mysql
index 24c4d45..aea3219 100755
--- a/src/mysql/bin/start_mysql
+++ b/src/mysql/bin/start_mysql
@@ -1,14 +1,20 @@
-#!/bin/sh
+#!/bin/sh -e
# shellcheck source=src/mysql/utilities/mysql-utilities
. "$SNAP/utilities/mysql-utilities"
-root_option_file="$SNAP_DATA/mysql/root.ini"
+mkdir -p "${SNAP_DATA}/logs"
+chmod 750 "${SNAP_DATA}/logs"
+
new_install=false
+if ! mysql_is_initialized; then
+ rm -rf "$SNAP_DATA/mysql"
+fi
+
# Make sure the database is initialized (this is safe to run if already
# initialized)
-if mysqld --initialize-insecure --basedir="$SNAP" --datadir="$SNAP_DATA/mysql" --lc-messages-dir="$SNAP/share"; then
+if mysqld --defaults-file="$SNAP/my.cnf" --initialize-insecure --basedir="$SNAP" --datadir="$SNAP_DATA/mysql" --lc-messages-dir="$SNAP/share" > /dev/null 2>&1; then
new_install=true
fi
@@ -30,12 +36,15 @@ if [ $new_install = true ]; then
printf "done\n"
# Save root user information
- cat <<-EOF > "$root_option_file"
+ cat <<-EOF > "$MYSQL_ROOT_OPTION_FILE"
[client]
socket=$MYSQL_SOCKET
user=root
EOF
- chmod 600 "$root_option_file"
+ chmod 600 "$MYSQL_ROOT_OPTION_FILE"
+
+ # Make sure we wait until MySQL is actually up before continuing
+ wait_for_mysql -f
# Now set everything up in one step:
# 1) Set the root user's password
@@ -43,43 +52,33 @@ if [ $new_install = true ]; then
# 3) Create the fiduswriter database
# 4) Grant the fiduswriter user privileges on the fiduswriter database
printf "Setting up users and fiduswriter database... "
- if mysql --defaults-file="$root_option_file" <<-SQL
+ if run-mysql <<-SQL
ALTER USER 'root'@'localhost' IDENTIFIED BY '$root_password';
CREATE USER 'fiduswriter'@'localhost' IDENTIFIED BY '$fiduswriter_password';
CREATE DATABASE fiduswriter;
- GRANT ALL PRIVILEGES ON fiduswriter.* TO 'fiduswriter'@'localhost' IDENTIFIED BY '$fiduswriter_password';
+ GRANT ALL PRIVILEGES ON fiduswriter.* TO 'fiduswriter'@'localhost';
SQL
then
# Now the root mysql user has a password. Save that as well.
- echo "password=$root_password" >> "$root_option_file"
+ echo "password=$root_password" >> "$MYSQL_ROOT_OPTION_FILE"
printf "done\n"
else
- echo "Failed to initialize-- reverting..."
+ echo "Failed to initialize-- undoing setup and will try again..."
"$SNAP/support-files/mysql.server" stop
- rm -rf "$SNAP_DATA"/mysql/*
+ rm -rf "$SNAP_DATA/mysql"
+ exit 1
fi
-
else
# Okay, this isn't a new installation. However, we recently changed
# the location of MySQL's socket (11.0.2snap1). Make sure the root
# option file is updated to look there instead of the old location.
- sed -ri "s|(socket\s*=\s*)/var/snap/.*mysql.sock|\1$MYSQL_SOCKET|" "$root_option_file"
+ sed -ri "s|(socket\s*=\s*)/var/snap/.*mysql.sock|\1$MYSQL_SOCKET|" "$MYSQL_ROOT_OPTION_FILE"
fi
-# Wait here until mysql is running
+# Wait here until mysql is running. MySQL 8 runs the upgrade automatically,
+# so no need to do that, anymore.
wait_for_mysql -f
-# Check and upgrade mysql tables if necessary. This will return 0 if the upgrade
-# succeeded, in which case we need to restart mysql.
-echo "Checking/upgrading mysql tables if necessary..."
-if mysql_upgrade --defaults-file="$root_option_file"; then
- echo "Restarting mysql server after upgrade..."
- "$SNAP/support-files/mysql.server" restart
-
- # Wait for server to come back after upgrade
- wait_for_mysql -f
-fi
-
# If this was a new installation, wait until the server is all up and running
# before saving off the fiduswriter user's password. This way the presence of the
# file can be used as a signal that mysql is ready to be used.
diff --git a/src/mysql/my.cnf b/src/mysql/my.cnf
index 4971e69..07b563b 100644
--- a/src/mysql/my.cnf
+++ b/src/mysql/my.cnf
@@ -3,6 +3,6 @@ user=root
max_allowed_packet=100M
secure-file-priv=NULL
skip-networking
-
-[mysqld_safe]
-log_error=error.log
+skip-log-bin
+transaction_isolation=READ-COMMITTED
+log_error=../logs/mysql_errors.log
diff --git a/src/mysql/support-files/mysql.server b/src/mysql/support-files/mysql.server
index d6fb637..10f60e9 100755
--- a/src/mysql/support-files/mysql.server
+++ b/src/mysql/support-files/mysql.server
@@ -115,7 +115,7 @@ other_args="$*" # uncommon, but needed when called from an RPM upgrade action
# Upstream mysql stuff, no need to fix this
# shellcheck disable=SC2116,SC2039
case "$(echo "testing\c")","$(echo -n testing)" in
- *c*,-n*) echo_n="" echo_c="" ;;
+ *c*,-n*) echo_n="" echo_c="" ;;
*c*,*) echo_n=-n echo_c="" ;;
*) echo_n="" echo_c='\c' ;;
esac
@@ -202,7 +202,7 @@ case "$mode" in
then
# Give extra arguments to mysqld with the my.cnf file. This script
# may be overwritten at next upgrade.
- "$bindir/mysqld_safe" --datadir="$datadir" --pid-file="$mysqld_pid_file_path" --lc-messages-dir="$SNAP/share" --socket="$MYSQL_SOCKET" "$other_args" >/dev/null 2>&1 &
+ "$bindir/mysqld_safe" --defaults-file="$SNAP/my.cnf" --datadir="$datadir" --pid-file="$mysqld_pid_file_path" --lc-messages-dir="$SNAP/share" --socket="$MYSQL_SOCKET" "$other_args" >/dev/null &
wait_for_pid created "$!" "$mysqld_pid_file_path"; return_value=$?
# Make lock for RedHat / SuSE
@@ -253,7 +253,7 @@ case "$mode" in
'restart')
# Stop the service and regardless of whether it was
# running or not, start it again.
- if $0 stop "$other_args"; then
+ if $0 stop "$other_args"; then
$0 start "$other_args"
else
log_failure_msg "Failed to stop running server, so refusing to try to start."
diff --git a/src/mysql/utilities/mysql-utilities b/src/mysql/utilities/mysql-utilities
index 0b72126..126dd0b 100755
--- a/src/mysql/utilities/mysql-utilities
+++ b/src/mysql/utilities/mysql-utilities
@@ -3,6 +3,7 @@
# shellcheck source=src/common/utilities/common-utilities
. "$SNAP/utilities/common-utilities"
+export MYSQL_ROOT_OPTION_FILE="$SNAP_DATA/mysql/root.ini"
export MYSQL_PIDFILE="/tmp/pids/mysql.pid"
export MYSQL_SOCKET="/tmp/sockets/mysql.sock"
export FIDUSWRITER_PASSWORD_FILE="$SNAP_DATA/mysql/fiduswriter_password"
@@ -19,7 +20,15 @@ mysql_is_running()
{
# Arguments:
# -f: Force the check, i.e. ignore if it's currently in setup
- [ -f "$MYSQL_PIDFILE" ] && [ -S "$MYSQL_SOCKET" ] && (! mysql_setup_running || [ "$1" = "-f" ])
+ [ -f "$MYSQL_PIDFILE" ] && \
+ [ -S "$MYSQL_SOCKET" ] && \
+ run-mysql -e 'SHOW DATABASES' > /dev/null 2>&1 && \
+ (! mysql_setup_running || [ "$1" = "-f" ])
+}
+
+mysql_is_initialized()
+{
+ [ -f "$MYSQL_ROOT_OPTION_FILE" ] && grep -q "password=" "$MYSQL_ROOT_OPTION_FILE"
}
wait_for_mysql()
@@ -54,6 +63,13 @@ mysql_pid()
fi
}
+mysql_flush_logs()
+{
+ if mysql_is_running ""; then
+ run-mysql -e 'FLUSH LOGS'
+ fi
+}
+
mysql_set_fiduswriter_password()
{
echo "$1" > "$FIDUSWRITER_PASSWORD_FILE"
diff --git a/src/patches/certbot-remove-default-config-files.patch b/src/patches/certbot-remove-default-config-files.patch
new file mode 100644
index 0000000..e129406
--- /dev/null
+++ b/src/patches/certbot-remove-default-config-files.patch
@@ -0,0 +1,16 @@
+--- a/constants.py 2019-04-25 13:54:51.740610037 -0700
++++ b/constants.py 2019-04-25 13:55:14.996430304 -0700
+@@ -15,12 +15,7 @@
+ """Plugins Setuptools entry point before rename."""
+
+ CLI_DEFAULTS = dict(
+- config_files=[
+- os.path.join(misc.get_default_folder('config'), 'cli.ini'),
+- # http://freedesktop.org/wiki/Software/xdg-user-dirs/
+- os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"),
+- "letsencrypt", "cli.ini"),
+- ],
++ config_files=[],
+
+ # Main parser
+ verbose_count=-int(logging.INFO / 10),
diff --git a/src/patches/certbot-remove-storage-chown.patch b/src/patches/certbot-remove-storage-chown.patch
new file mode 100644
index 0000000..a813801
--- /dev/null
+++ b/src/patches/certbot-remove-storage-chown.patch
@@ -0,0 +1,11 @@
+--- a/storage.py 2019-05-18 10:01:00.196684342 +0000
++++ b/storage.py 2019-05-18 10:01:04.352539257 +0000
+@@ -1108,8 +1108,7 @@
+ (stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | \
+ stat.S_IROTH)
+ mode = BASE_PRIVKEY_MODE | old_mode
+- os.chown(target["privkey"], -1, os.stat(old_privkey).st_gid)
+ os.chmod(target["privkey"], mode)
+
+ # Save everything else
+ with open(target["cert"], "wb") as f:
diff --git a/src/mysql-patches/mysql-support-compile-time-disabling-of-setpriority.patch b/src/patches/mysql-support-compile-time-disabling-of-setpriority.patch
similarity index 100%
rename from src/mysql-patches/mysql-support-compile-time-disabling-of-setpriority.patch
rename to src/patches/mysql-support-compile-time-disabling-of-setpriority.patch
diff --git a/src/redis/bin/start-redis-server b/src/redis/bin/start-redis-server
new file mode 100755
index 0000000..efb1963
--- /dev/null
+++ b/src/redis/bin/start-redis-server
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+# shellcheck source=src/redis/utilities/redis-utilities
+. "$SNAP/utilities/redis-utilities"
+
+mkdir -p "${SNAP_DATA}/logs"
+chmod 750 "${SNAP_DATA}/logs"
+
+# redis doesn't support environment variables in its config files. Thankfully
+# it supports reading the config file from stdin though, so we'll rewrite the
+# config file on the fly and pipe it in.
+envsubst < "$SNAP/config/redis/redis.conf" | redis-server -
diff --git a/src/redis/config/redis.conf b/src/redis/config/redis.conf
new file mode 100644
index 0000000..46a0c3a
--- /dev/null
+++ b/src/redis/config/redis.conf
@@ -0,0 +1,1023 @@
+# Redis configuration file example.
+#
+# Note that in order to read the configuration file, Redis must be
+# started with the file path as first argument:
+#
+# ./redis-server /path/to/redis.conf
+
+# Note on units: when memory size is needed, it is possible to specify
+# it in the usual form of 1k 5GB 4M and so forth:
+#
+# 1k => 1000 bytes
+# 1kb => 1024 bytes
+# 1m => 1000000 bytes
+# 1mb => 1024*1024 bytes
+# 1g => 1000000000 bytes
+# 1gb => 1024*1024*1024 bytes
+#
+# units are case insensitive so 1GB 1Gb 1gB are all the same.
+
+################################## INCLUDES ###################################
+
+# Include one or more other config files here. This is useful if you
+# have a standard template that goes to all Redis servers but also need
+# to customize a few per-server settings. Include files can include
+# other files, so use this wisely.
+#
+# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
+# from admin or Redis Sentinel. Since Redis always uses the last processed
+# line as value of a configuration directive, you'd better put includes
+# at the beginning of this file to avoid overwriting config change at runtime.
+#
+# If instead you are interested in using includes to override configuration
+# options, it is better to use include as the last line.
+#
+# include /path/to/local.conf
+# include /path/to/other.conf
+
+################################## NETWORK #####################################
+
+# By default, if no "bind" configuration directive is specified, Redis listens
+# for connections from all the network interfaces available on the server.
+# It is possible to listen to just one or multiple selected interfaces using
+# the "bind" configuration directive, followed by one or more IP addresses.
+#
+# Examples:
+#
+# bind 192.168.1.100 10.0.0.1
+# bind 127.0.0.1 ::1
+#
+# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
+# internet, binding to all the interfaces is dangerous and will expose the
+# instance to everybody on the internet. So by default we uncomment the
+# following bind directive, that will force Redis to listen only into
+# the IPv4 lookback interface address (this means Redis will be able to
+# accept connections only from clients running into the same computer it
+# is running).
+#
+# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
+# JUST COMMENT THE FOLLOWING LINE.
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+bind 127.0.0.1
+
+# Protected mode is a layer of security protection, in order to avoid that
+# Redis instances left open on the internet are accessed and exploited.
+#
+# When protected mode is on and if:
+#
+# 1) The server is not binding explicitly to a set of addresses using the
+# "bind" directive.
+# 2) No password is configured.
+#
+# The server only accepts connections from clients connecting from the
+# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
+# sockets.
+#
+# By default protected mode is enabled. You should disable it only if
+# you are sure you want clients from other hosts to connect to Redis
+# even if no authentication is configured, nor a specific set of interfaces
+# are explicitly listed using the "bind" directive.
+protected-mode yes
+
+# Accept connections on the specified port, default is 6379 (IANA #815344).
+# If port 0 is specified Redis will not listen on a TCP socket.
+port 0
+
+# TCP listen() backlog.
+#
+# In high requests-per-second environments you need an high backlog in order
+# to avoid slow clients connections issues. Note that the Linux kernel
+# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
+# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
+# in order to get the desired effect.
+tcp-backlog 511
+
+# Unix socket.
+#
+# Specify the path for the Unix socket that will be used to listen for
+# incoming connections. There is no default, so Redis will not listen
+# on a unix socket when not specified.
+#
+unixsocket ${REDIS_SOCKET}
+# unixsocketperm 700
+
+# Close the connection after a client is idle for N seconds (0 to disable)
+timeout 0
+
+# TCP keepalive.
+#
+# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
+# of communication. This is useful for two reasons:
+#
+# 1) Detect dead peers.
+# 2) Take the connection alive from the point of view of network
+# equipment in the middle.
+#
+# On Linux, the specified value (in seconds) is the period used to send ACKs.
+# Note that to close the connection the double of the time is needed.
+# On other kernels the period depends on the kernel configuration.
+#
+# A reasonable value for this option is 300 seconds, which is the new
+# Redis default starting with Redis 3.2.1.
+tcp-keepalive 300
+
+################################# GENERAL #####################################
+
+# By default Redis does not run as a daemon. Use 'yes' if you need it.
+# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
+daemonize no
+
+# If you run Redis from upstart or systemd, Redis can interact with your
+# supervision tree. Options:
+# supervised no - no supervision interaction
+# supervised upstart - signal upstart by putting Redis into SIGSTOP mode
+# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
+# supervised auto - detect upstart or systemd method based on
+# UPSTART_JOB or NOTIFY_SOCKET environment variables
+# Note: these supervision methods only signal "process is ready."
+# They do not enable continuous liveness pings back to your supervisor.
+supervised no
+
+# If a pid file is specified, Redis writes it where specified at startup
+# and removes it at exit.
+#
+# When the server runs non daemonized, no pid file is created if none is
+# specified in the configuration. When the server is daemonized, the pid file
+# is used even if not specified, defaulting to "/var/run/redis.pid".
+#
+# Creating a pid file is best effort: if Redis is not able to create it
+# nothing bad happens, the server will start and run normally.
+pidfile ${REDIS_PIDFILE}
+
+# Specify the server verbosity level.
+# This can be one of:
+# debug (a lot of information, useful for development/testing)
+# verbose (many rarely useful info, but not a mess like the debug level)
+# notice (moderately verbose, what you want in production probably)
+# warning (only very important / critical messages are logged)
+loglevel notice
+
+# Specify the log file name. Also the empty string can be used to force
+# Redis to log on the standard output. Note that if you use standard
+# output for logging but daemonize, logs will be sent to /dev/null
+logfile ${SNAP_DATA}/logs/redis.log
+
+# To enable logging to the system logger, just set 'syslog-enabled' to yes,
+# and optionally update the other syslog parameters to suit your needs.
+# syslog-enabled no
+
+# Specify the syslog identity.
+# syslog-ident redis
+
+# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
+# syslog-facility local0
+
+# Set the number of databases. The default database is DB 0, you can select
+# a different one on a per-connection basis using SELECT where
+# dbid is a number between 0 and 'databases'-1
+databases 16
+
+################################ SNAPSHOTTING ################################
+#
+# Save the DB on disk:
+#
+# save
+#
+# Will save the DB if both the given number of seconds and the given
+# number of write operations against the DB occurred.
+#
+# In the example below the behaviour will be to save:
+# after 900 sec (15 min) if at least 1 key changed
+# after 300 sec (5 min) if at least 10 keys changed
+# after 60 sec if at least 10000 keys changed
+#
+# Note: you can disable saving completely by commenting out all "save" lines.
+#
+# It is also possible to remove all the previously configured save
+# points by adding a save directive with a single empty string argument
+# like in the following example:
+#
+# save ""
+
+save 900 1
+save 300 10
+save 60 10000
+
+# By default Redis will stop accepting writes if RDB snapshots are enabled
+# (at least one save point) and the latest background save failed.
+# This will make the user aware (in a hard way) that data is not persisting
+# on disk properly, otherwise chances are that no one will notice and some
+# disaster will happen.
+#
+# If the background saving process will start working again Redis will
+# automatically allow writes again.
+#
+# However if you have setup your proper monitoring of the Redis server
+# and persistence, you may want to disable this feature so that Redis will
+# continue to work as usual even if there are problems with disk,
+# permissions, and so forth.
+stop-writes-on-bgsave-error yes
+
+# Compress string objects using LZF when dump .rdb databases?
+# For default that's set to 'yes' as it's almost always a win.
+# If you want to save some CPU in the saving child set it to 'no' but
+# the dataset will likely be bigger if you have compressible values or keys.
+rdbcompression yes
+
+# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
+# This makes the format more resistant to corruption but there is a performance
+# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
+# for maximum performances.
+#
+# RDB files created with checksum disabled have a checksum of zero that will
+# tell the loading code to skip the check.
+rdbchecksum yes
+
+# The filename where to dump the DB
+dbfilename dump.rdb
+
+# The working directory.
+#
+# The DB will be written inside this directory, with the filename specified
+# above using the 'dbfilename' configuration directive.
+#
+# The Append Only File will also be created inside this directory.
+#
+# Note that you must specify a directory here, not a file name.
+dir ${SNAP_DATA}/redis/
+
+################################# REPLICATION #################################
+
+# Master-Slave replication. Use slaveof to make a Redis instance a copy of
+# another Redis server. A few things to understand ASAP about Redis replication.
+#
+# 1) Redis replication is asynchronous, but you can configure a master to
+# stop accepting writes if it appears to be not connected with at least
+# a given number of slaves.
+# 2) Redis slaves are able to perform a partial resynchronization with the
+# master if the replication link is lost for a relatively small amount of
+# time. You may want to configure the replication backlog size (see the next
+# sections of this file) with a sensible value depending on your needs.
+# 3) Replication is automatic and does not need user intervention. After a
+# network partition slaves automatically try to reconnect to masters
+# and resynchronize with them.
+#
+# slaveof
+
+# If the master is password protected (using the "requirepass" configuration
+# directive below) it is possible to tell the slave to authenticate before
+# starting the replication synchronization process, otherwise the master will
+# refuse the slave request.
+#
+# masterauth
+
+# When a slave loses its connection with the master, or when the replication
+# is still in progress, the slave can act in two different ways:
+#
+# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
+# still reply to client requests, possibly with out of date data, or the
+# data set may just be empty if this is the first synchronization.
+#
+# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
+# an error "SYNC with master in progress" to all the kind of commands
+# but to INFO and SLAVEOF.
+#
+slave-serve-stale-data yes
+
+# You can configure a slave instance to accept writes or not. Writing against
+# a slave instance may be useful to store some ephemeral data (because data
+# written on a slave will be easily deleted after resync with the master) but
+# may also cause problems if clients are writing to it because of a
+# misconfiguration.
+#
+# Since Redis 2.6 by default slaves are read-only.
+#
+# Note: read only slaves are not designed to be exposed to untrusted clients
+# on the internet. It's just a protection layer against misuse of the instance.
+# Still a read only slave exports by default all the administrative commands
+# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
+# security of read only slaves using 'rename-command' to shadow all the
+# administrative / dangerous commands.
+slave-read-only yes
+
+# Replication SYNC strategy: disk or socket.
+#
+# -------------------------------------------------------
+# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
+# -------------------------------------------------------
+#
+# New slaves and reconnecting slaves that are not able to continue the replication
+# process just receiving differences, need to do what is called a "full
+# synchronization". An RDB file is transmitted from the master to the slaves.
+# The transmission can happen in two different ways:
+#
+# 1) Disk-backed: The Redis master creates a new process that writes the RDB
+# file on disk. Later the file is transferred by the parent
+# process to the slaves incrementally.
+# 2) Diskless: The Redis master creates a new process that directly writes the
+# RDB file to slave sockets, without touching the disk at all.
+#
+# With disk-backed replication, while the RDB file is generated, more slaves
+# can be queued and served with the RDB file as soon as the current child producing
+# the RDB file finishes its work. With diskless replication instead once
+# the transfer starts, new slaves arriving will be queued and a new transfer
+# will start when the current one terminates.
+#
+# When diskless replication is used, the master waits a configurable amount of
+# time (in seconds) before starting the transfer in the hope that multiple slaves
+# will arrive and the transfer can be parallelized.
+#
+# With slow disks and fast (large bandwidth) networks, diskless replication
+# works better.
+repl-diskless-sync no
+
+# When diskless replication is enabled, it is possible to configure the delay
+# the server waits in order to spawn the child that transfers the RDB via socket
+# to the slaves.
+#
+# This is important since once the transfer starts, it is not possible to serve
+# new slaves arriving, that will be queued for the next RDB transfer, so the server
+# waits a delay in order to let more slaves arrive.
+#
+# The delay is specified in seconds, and by default is 5 seconds. To disable
+# it entirely just set it to 0 seconds and the transfer will start ASAP.
+repl-diskless-sync-delay 5
+
+# Slaves send PINGs to server in a predefined interval. It's possible to change
+# this interval with the repl_ping_slave_period option. The default value is 10
+# seconds.
+#
+# repl-ping-slave-period 10
+
+# The following option sets the replication timeout for:
+#
+# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
+# 2) Master timeout from the point of view of slaves (data, pings).
+# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
+#
+# It is important to make sure that this value is greater than the value
+# specified for repl-ping-slave-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the slave.
+#
+# repl-timeout 60
+
+# Disable TCP_NODELAY on the slave socket after SYNC?
+#
+# If you select "yes" Redis will use a smaller number of TCP packets and
+# less bandwidth to send data to slaves. But this can add a delay for
+# the data to appear on the slave side, up to 40 milliseconds with
+# Linux kernels using a default configuration.
+#
+# If you select "no" the delay for data to appear on the slave side will
+# be reduced but more bandwidth will be used for replication.
+#
+# By default we optimize for low latency, but in very high traffic conditions
+# or when the master and slaves are many hops away, turning this to "yes" may
+# be a good idea.
+repl-disable-tcp-nodelay no
+
+# Set the replication backlog size. The backlog is a buffer that accumulates
+# slave data when slaves are disconnected for some time, so that when a slave
+# wants to reconnect again, often a full resync is not needed, but a partial
+# resync is enough, just passing the portion of data the slave missed while
+# disconnected.
+#
+# The bigger the replication backlog, the longer the time the slave can be
+# disconnected and later be able to perform a partial resynchronization.
+#
+# The backlog is only allocated once there is at least a slave connected.
+#
+# repl-backlog-size 1mb
+
+# After a master has no longer connected slaves for some time, the backlog
+# will be freed. The following option configures the amount of seconds that
+# need to elapse, starting from the time the last slave disconnected, for
+# the backlog buffer to be freed.
+#
+# A value of 0 means to never release the backlog.
+#
+# repl-backlog-ttl 3600
+
+# The slave priority is an integer number published by Redis in the INFO output.
+# It is used by Redis Sentinel in order to select a slave to promote into a
+# master if the master is no longer working correctly.
+#
+# A slave with a low priority number is considered better for promotion, so
+# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
+# pick the one with priority 10, that is the lowest.
+#
+# However a special priority of 0 marks the slave as not able to perform the
+# role of master, so a slave with priority of 0 will never be selected by
+# Redis Sentinel for promotion.
+#
+# By default the priority is 100.
+slave-priority 100
+
+# It is possible for a master to stop accepting writes if there are less than
+# N slaves connected, having a lag less or equal than M seconds.
+#
+# The N slaves need to be in "online" state.
+#
+# The lag in seconds, that must be <= the specified value, is calculated from
+# the last ping received from the slave, that is usually sent every second.
+#
+# This option does not GUARANTEE that N replicas will accept the write, but
+# will limit the window of exposure for lost writes in case not enough slaves
+# are available, to the specified number of seconds.
+#
+# For example to require at least 3 slaves with a lag <= 10 seconds use:
+#
+# min-slaves-to-write 3
+# min-slaves-max-lag 10
+#
+# Setting one or the other to 0 disables the feature.
+#
+# By default min-slaves-to-write is set to 0 (feature disabled) and
+# min-slaves-max-lag is set to 10.
+
+################################## SECURITY ###################################
+
+# Require clients to issue AUTH before processing any other
+# commands. This might be useful in environments in which you do not trust
+# others with access to the host running redis-server.
+#
+# This should stay commented out for backward compatibility and because most
+# people do not need auth (e.g. they run their own servers).
+#
+# Warning: since Redis is pretty fast an outside user can try up to
+# 150k passwords per second against a good box. This means that you should
+# use a very strong password otherwise it will be very easy to break.
+#
+# requirepass foobared
+
+# Command renaming.
+#
+# It is possible to change the name of dangerous commands in a shared
+# environment. For instance the CONFIG command may be renamed into something
+# hard to guess so that it will still be available for internal-use tools
+# but not available for general clients.
+#
+# Example:
+#
+# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
+#
+# It is also possible to completely kill a command by renaming it into
+# an empty string:
+#
+# rename-command CONFIG ""
+#
+# Please note that changing the name of commands that are logged into the
+# AOF file or transmitted to slaves may cause problems.
+
+################################### LIMITS ####################################
+
+# Set the max number of connected clients at the same time. By default
+# this limit is set to 10000 clients, however if the Redis server is not
+# able to configure the process file limit to allow for the specified limit
+# the max number of allowed clients is set to the current file limit
+# minus 32 (as Redis reserves a few file descriptors for internal uses).
+#
+# Once the limit is reached Redis will close all the new connections sending
+# an error 'max number of clients reached'.
+#
+# maxclients 10000
+
+# Don't use more memory than the specified amount of bytes.
+# When the memory limit is reached Redis will try to remove keys
+# according to the eviction policy selected (see maxmemory-policy).
+#
+# If Redis can't remove keys according to the policy, or if the policy is
+# set to 'noeviction', Redis will start to reply with errors to commands
+# that would use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to read-only commands like GET.
+#
+# This option is usually useful when using Redis as an LRU cache, or to set
+# a hard memory limit for an instance (using the 'noeviction' policy).
+#
+# WARNING: If you have slaves attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the slaves are subtracted
+# from the used memory count, so that network problems / resyncs will
+# not trigger a loop where keys are evicted, and in turn the output
+# buffer of slaves is full with DELs of keys evicted triggering the deletion
+# of more keys, and so forth until the database is completely emptied.
+#
+# In short... if you have slaves attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for slave
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory
+
+# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
+# is reached. You can select among five behaviors:
+#
+# volatile-lru -> remove the key with an expire set using an LRU algorithm
+# allkeys-lru -> remove any key according to the LRU algorithm
+# volatile-random -> remove a random key with an expire set
+# allkeys-random -> remove a random key, any key
+# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
+# noeviction -> don't expire at all, just return an error on write operations
+#
+# Note: with any of the above policies, Redis will return an error on write
+# operations, when there are no suitable keys for eviction.
+#
+# At the date of writing these commands are: set setnx setex append
+# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
+# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
+# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
+# getset mset msetnx exec sort
+#
+# The default is:
+#
+# maxmemory-policy noeviction
+
+# LRU and minimal TTL algorithms are not precise algorithms but approximated
+# algorithms (in order to save memory), so you can tune it for speed or
+# accuracy. For default Redis will check five keys and pick the one that was
+# used less recently, you can change the sample size using the following
+# configuration directive.
+#
+# The default of 5 produces good enough results. 10 Approximates very closely
+# true LRU but costs a bit more CPU. 3 is very fast but not very accurate.
+#
+# maxmemory-samples 5
+
+############################## APPEND ONLY MODE ###############################
+
+# By default Redis asynchronously dumps the dataset on disk. This mode is
+# good enough in many applications, but an issue with the Redis process or
+# a power outage may result into a few minutes of writes lost (depending on
+# the configured save points).
+#
+# The Append Only File is an alternative persistence mode that provides
+# much better durability. For instance using the default data fsync policy
+# (see later in the config file) Redis can lose just one second of writes in a
+# dramatic event like a server power outage, or a single write if something
+# wrong with the Redis process itself happens, but the operating system is
+# still running correctly.
+#
+# AOF and RDB persistence can be enabled at the same time without problems.
+# If the AOF is enabled on startup Redis will load the AOF, that is the file
+# with the better durability guarantees.
+#
+# Please check http://redis.io/topics/persistence for more information.
+
+appendonly no
+
+# The name of the append only file (default: "appendonly.aof")
+
+appendfilename "appendonly.aof"
+
+# The fsync() call tells the Operating System to actually write data on disk
+# instead of waiting for more data in the output buffer. Some OS will really flush
+# data on disk, some other OS will just try to do it ASAP.
+#
+# Redis supports three different modes:
+#
+# no: don't fsync, just let the OS flush the data when it wants. Faster.
+# always: fsync after every write to the append only log. Slow, Safest.
+# everysec: fsync only one time every second. Compromise.
+#
+# The default is "everysec", as that's usually the right compromise between
+# speed and data safety. It's up to you to understand if you can relax this to
+# "no" that will let the operating system flush the output buffer when
+# it wants, for better performances (but if you can live with the idea of
+# some data loss consider the default persistence mode that's snapshotting),
+# or on the contrary, use "always" that's very slow but a bit safer than
+# everysec.
+#
+# More details please check the following article:
+# http://antirez.com/post/redis-persistence-demystified.html
+#
+# If unsure, use "everysec".
+
+# appendfsync always
+appendfsync everysec
+# appendfsync no
+
+# When the AOF fsync policy is set to always or everysec, and a background
+# saving process (a background save or AOF log background rewriting) is
+# performing a lot of I/O against the disk, in some Linux configurations
+# Redis may block too long on the fsync() call. Note that there is no fix for
+# this currently, as even performing fsync in a different thread will block
+# our synchronous write(2) call.
+#
+# In order to mitigate this problem it's possible to use the following option
+# that will prevent fsync() from being called in the main process while a
+# BGSAVE or BGREWRITEAOF is in progress.
+#
+# This means that while another child is saving, the durability of Redis is
+# the same as "appendfsync none". In practical terms, this means that it is
+# possible to lose up to 30 seconds of log in the worst scenario (with the
+# default Linux settings).
+#
+# If you have latency problems turn this to "yes". Otherwise leave it as
+# "no" that is the safest pick from the point of view of durability.
+
+no-appendfsync-on-rewrite no
+
+# Automatic rewrite of the append only file.
+# Redis is able to automatically rewrite the log file implicitly calling
+# BGREWRITEAOF when the AOF log size grows by the specified percentage.
+#
+# This is how it works: Redis remembers the size of the AOF file after the
+# latest rewrite (if no rewrite has happened since the restart, the size of
+# the AOF at startup is used).
+#
+# This base size is compared to the current size. If the current size is
+# bigger than the specified percentage, the rewrite is triggered. Also
+# you need to specify a minimal size for the AOF file to be rewritten, this
+# is useful to avoid rewriting the AOF file even if the percentage increase
+# is reached but it is still pretty small.
+#
+# Specify a percentage of zero in order to disable the automatic AOF
+# rewrite feature.
+
+auto-aof-rewrite-percentage 100
+auto-aof-rewrite-min-size 64mb
+
+# An AOF file may be found to be truncated at the end during the Redis
+# startup process, when the AOF data gets loaded back into memory.
+# This may happen when the system where Redis is running
+# crashes, especially when an ext4 filesystem is mounted without the
+# data=ordered option (however this can't happen when Redis itself
+# crashes or aborts but the operating system still works correctly).
+#
+# Redis can either exit with an error when this happens, or load as much
+# data as possible (the default now) and start if the AOF file is found
+# to be truncated at the end. The following option controls this behavior.
+#
+# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
+# the Redis server starts emitting a log to inform the user of the event.
+# Otherwise if the option is set to no, the server aborts with an error
+# and refuses to start. When the option is set to no, the user requires
+# to fix the AOF file using the "redis-check-aof" utility before to restart
+# the server.
+#
+# Note that if the AOF file will be found to be corrupted in the middle
+# the server will still exit with an error. This option only applies when
+# Redis will try to read more data from the AOF file but not enough bytes
+# will be found.
+aof-load-truncated yes
+
+################################ LUA SCRIPTING ###############################
+
+# Max execution time of a Lua script in milliseconds.
+#
+# If the maximum execution time is reached Redis will log that a script is
+# still in execution after the maximum allowed time and will start to
+# reply to queries with an error.
+#
+# When a long running script exceeds the maximum execution time only the
+# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
+# used to stop a script that did not yet called write commands. The second
+# is the only way to shut down the server in the case a write command was
+# already issued by the script but the user doesn't want to wait for the natural
+# termination of the script.
+#
+# Set it to 0 or a negative value for unlimited execution without warnings.
+lua-time-limit 5000
+
+################################ REDIS CLUSTER ###############################
+#
+# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however
+# in order to mark it as "mature" we need to wait for a non trivial percentage
+# of users to deploy it in production.
+# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+#
+# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
+# started as cluster nodes can. In order to start a Redis instance as a
+# cluster node enable the cluster support uncommenting the following:
+#
+# cluster-enabled yes
+
+# Every cluster node has a cluster configuration file. This file is not
+# intended to be edited by hand. It is created and updated by Redis nodes.
+# Every Redis Cluster node requires a different cluster configuration file.
+# Make sure that instances running in the same system do not have
+# overlapping cluster configuration file names.
+#
+# cluster-config-file nodes-6379.conf
+
+# Cluster node timeout is the amount of milliseconds a node must be unreachable
+# for it to be considered in failure state.
+# Most other internal time limits are multiple of the node timeout.
+#
+# cluster-node-timeout 15000
+
+# A slave of a failing master will avoid to start a failover if its data
+# looks too old.
+#
+# There is no simple way for a slave to actually have a exact measure of
+# its "data age", so the following two checks are performed:
+#
+# 1) If there are multiple slaves able to failover, they exchange messages
+# in order to try to give an advantage to the slave with the best
+# replication offset (more data from the master processed).
+# Slaves will try to get their rank by offset, and apply to the start
+# of the failover a delay proportional to their rank.
+#
+# 2) Every single slave computes the time of the last interaction with
+# its master. This can be the last ping or command received (if the master
+# is still in the "connected" state), or the time that elapsed since the
+# disconnection with the master (if the replication link is currently down).
+# If the last interaction is too old, the slave will not try to failover
+# at all.
+#
+# The point "2" can be tuned by user. Specifically a slave will not perform
+# the failover if, since the last interaction with the master, the time
+# elapsed is greater than:
+#
+# (node-timeout * slave-validity-factor) + repl-ping-slave-period
+#
+# So for example if node-timeout is 30 seconds, and the slave-validity-factor
+# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the
+# slave will not try to failover if it was not able to talk with the master
+# for longer than 310 seconds.
+#
+# A large slave-validity-factor may allow slaves with too old data to failover
+# a master, while a too small value may prevent the cluster from being able to
+# elect a slave at all.
+#
+# For maximum availability, it is possible to set the slave-validity-factor
+# to a value of 0, which means, that slaves will always try to failover the
+# master regardless of the last time they interacted with the master.
+# (However they'll always try to apply a delay proportional to their
+# offset rank).
+#
+# Zero is the only value able to guarantee that when all the partitions heal
+# the cluster will always be able to continue.
+#
+# cluster-slave-validity-factor 10
+
+# Cluster slaves are able to migrate to orphaned masters, that are masters
+# that are left without working slaves. This improves the cluster ability
+# to resist to failures as otherwise an orphaned master can't be failed over
+# in case of failure if it has no working slaves.
+#
+# Slaves migrate to orphaned masters only if there are still at least a
+# given number of other working slaves for their old master. This number
+# is the "migration barrier". A migration barrier of 1 means that a slave
+# will migrate only if there is at least 1 other working slave for its master
+# and so forth. It usually reflects the number of slaves you want for every
+# master in your cluster.
+#
+# Default is 1 (slaves migrate only if their masters remain with at least
+# one slave). To disable migration just set it to a very large value.
+# A value of 0 can be set but is useful only for debugging and dangerous
+# in production.
+#
+# cluster-migration-barrier 1
+
+# By default Redis Cluster nodes stop accepting queries if they detect there
+# is at least an hash slot uncovered (no available node is serving it).
+# This way if the cluster is partially down (for example a range of hash slots
+# are no longer covered) all the cluster becomes, eventually, unavailable.
+# It automatically returns available as soon as all the slots are covered again.
+#
+# However sometimes you want the subset of the cluster which is working,
+# to continue to accept queries for the part of the key space that is still
+# covered. In order to do so, just set the cluster-require-full-coverage
+# option to no.
+#
+# cluster-require-full-coverage yes
+
+# In order to setup your cluster make sure to read the documentation
+# available at http://redis.io web site.
+
+################################## SLOW LOG ###################################
+
+# The Redis Slow Log is a system to log queries that exceeded a specified
+# execution time. The execution time does not include the I/O operations
+# like talking with the client, sending the reply and so forth,
+# but just the time needed to actually execute the command (this is the only
+# stage of command execution where the thread is blocked and can not serve
+# other requests in the meantime).
+#
+# You can configure the slow log with two parameters: one tells Redis
+# what is the execution time, in microseconds, to exceed in order for the
+# command to get logged, and the other parameter is the length of the
+# slow log. When a new command is logged the oldest one is removed from the
+# queue of logged commands.
+
+# The following time is expressed in microseconds, so 1000000 is equivalent
+# to one second. Note that a negative number disables the slow log, while
+# a value of zero forces the logging of every command.
+slowlog-log-slower-than 10000
+
+# There is no limit to this length. Just be aware that it will consume memory.
+# You can reclaim memory used by the slow log with SLOWLOG RESET.
+slowlog-max-len 128
+
+################################ LATENCY MONITOR ##############################
+
+# The Redis latency monitoring subsystem samples different operations
+# at runtime in order to collect data related to possible sources of
+# latency of a Redis instance.
+#
+# Via the LATENCY command this information is available to the user that can
+# print graphs and obtain reports.
+#
+# The system only logs operations that were performed in a time equal or
+# greater than the amount of milliseconds specified via the
+# latency-monitor-threshold configuration directive. When its value is set
+# to zero, the latency monitor is turned off.
+#
+# By default latency monitoring is disabled since it is mostly not needed
+# if you don't have latency issues, and collecting data has a performance
+# impact, that while very small, can be measured under big load. Latency
+# monitoring can easily be enabled at runtime using the command
+# "CONFIG SET latency-monitor-threshold " if needed.
+latency-monitor-threshold 0
+
+############################# EVENT NOTIFICATION ##############################
+
+# Redis can notify Pub/Sub clients about events happening in the key space.
+# This feature is documented at http://redis.io/topics/notifications
+#
+# For instance if keyspace events notification is enabled, and a client
+# performs a DEL operation on key "foo" stored in the Database 0, two
+# messages will be published via Pub/Sub:
+#
+# PUBLISH __keyspace@0__:foo del
+# PUBLISH __keyevent@0__:del foo
+#
+# It is possible to select the events that Redis will notify among a set
+# of classes. Every class is identified by a single character:
+#
+# K Keyspace events, published with __keyspace@__ prefix.
+# E Keyevent events, published with __keyevent@__ prefix.
+# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
+# $ String commands
+# l List commands
+# s Set commands
+# h Hash commands
+# z Sorted set commands
+# x Expired events (events generated every time a key expires)
+# e Evicted events (events generated when a key is evicted for maxmemory)
+# A Alias for g$lshzxe, so that the "AKE" string means all the events.
+#
+# The "notify-keyspace-events" takes as argument a string that is composed
+# of zero or multiple characters. The empty string means that notifications
+# are disabled.
+#
+# Example: to enable list and generic events, from the point of view of the
+# event name, use:
+#
+# notify-keyspace-events Elg
+#
+# Example 2: to get the stream of the expired keys subscribing to channel
+# name __keyevent@0__:expired use:
+#
+# notify-keyspace-events Ex
+#
+# By default all notifications are disabled because most users don't need
+# this feature and the feature has some overhead. Note that if you don't
+# specify at least one of K or E, no events will be delivered.
+notify-keyspace-events ""
+
+############################### ADVANCED CONFIG ###############################
+
+# Hashes are encoded using a memory efficient data structure when they have a
+# small number of entries, and the biggest entry does not exceed a given
+# threshold. These thresholds can be configured using the following directives.
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
+
+# Lists are also encoded in a special way to save a lot of space.
+# The number of entries allowed per internal list node can be specified
+# as a fixed maximum size or a maximum number of elements.
+# For a fixed maximum size, use -5 through -1, meaning:
+# -5: max size: 64 Kb <-- not recommended for normal workloads
+# -4: max size: 32 Kb <-- not recommended
+# -3: max size: 16 Kb <-- probably not recommended
+# -2: max size: 8 Kb <-- good
+# -1: max size: 4 Kb <-- good
+# Positive numbers mean store up to _exactly_ that number of elements
+# per list node.
+# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
+# but if your use case is unique, adjust the settings as necessary.
+list-max-ziplist-size -2
+
+# Lists may also be compressed.
+# Compress depth is the number of quicklist ziplist nodes from *each* side of
+# the list to *exclude* from compression. The head and tail of the list
+# are always uncompressed for fast push/pop operations. Settings are:
+# 0: disable all list compression
+# 1: depth 1 means "don't start compressing until after 1 node into the list,
+# going from either the head or tail"
+# So: [head]->node->node->...->node->[tail]
+# [head], [tail] will always be uncompressed; inner nodes will compress.
+# 2: [head]->[next]->node->node->...->node->[prev]->[tail]
+# 2 here means: don't compress head or head->next or tail->prev or tail,
+# but compress all nodes between them.
+# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
+# etc.
+list-compress-depth 0
+
+# Sets have a special encoding in just one case: when a set is composed
+# of just strings that happen to be integers in radix 10 in the range
+# of 64 bit signed integers.
+# The following configuration setting sets the limit in the size of the
+# set in order to use this special memory saving encoding.
+set-max-intset-entries 512
+
+# Similarly to hashes and lists, sorted sets are also specially encoded in
+# order to save a lot of space. This encoding is only used when the length and
+# elements of a sorted set are below the following limits:
+zset-max-ziplist-entries 128
+zset-max-ziplist-value 64
+
+# HyperLogLog sparse representation bytes limit. The limit includes the
+# 16 bytes header. When an HyperLogLog using the sparse representation crosses
+# this limit, it is converted into the dense representation.
+#
+# A value greater than 16000 is totally useless, since at that point the
+# dense representation is more memory efficient.
+#
+# The suggested value is ~ 3000 in order to have the benefits of
+# the space efficient encoding without slowing down too much PFADD,
+# which is O(N) with the sparse encoding. The value can be raised to
+# ~ 10000 when CPU is not a concern, but space is, and the data set is
+# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
+hll-sparse-max-bytes 3000
+
+# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
+# order to help rehashing the main Redis hash table (the one mapping top-level
+# keys to values). The hash table implementation Redis uses (see dict.c)
+# performs a lazy rehashing: the more operation you run into a hash table
+# that is rehashing, the more rehashing "steps" are performed, so if the
+# server is idle the rehashing is never complete and some more memory is used
+# by the hash table.
+#
+# The default is to use this millisecond 10 times every second in order to
+# actively rehash the main dictionaries, freeing memory when possible.
+#
+# If unsure:
+# use "activerehashing no" if you have hard latency requirements and it is
+# not a good thing in your environment that Redis can reply from time to time
+# to queries with 2 milliseconds delay.
+#
+# use "activerehashing yes" if you don't have such hard requirements but
+# want to free memory asap when possible.
+activerehashing yes
+
+# The client output buffer limits can be used to force disconnection of clients
+# that are not reading data from the server fast enough for some reason (a
+# common reason is that a Pub/Sub client can't consume messages as fast as the
+# publisher can produce them).
+#
+# The limit can be set differently for the three different classes of clients:
+#
+# normal -> normal clients including MONITOR clients
+# slave -> slave clients
+# pubsub -> clients subscribed to at least one pubsub channel or pattern
+#
+# The syntax of every client-output-buffer-limit directive is the following:
+#
+# client-output-buffer-limit
+#
+# A client is immediately disconnected once the hard limit is reached, or if
+# the soft limit is reached and remains reached for the specified number of
+# seconds (continuously).
+# So for instance if the hard limit is 32 megabytes and the soft limit is
+# 16 megabytes / 10 seconds, the client will get disconnected immediately
+# if the size of the output buffers reach 32 megabytes, but will also get
+# disconnected if the client reaches 16 megabytes and continuously overcomes
+# the limit for 10 seconds.
+#
+# By default normal clients are not limited because they don't receive data
+# without asking (in a push way), but just after a request, so only
+# asynchronous clients may create a scenario where data is requested faster
+# than it can read.
+#
+# Instead there is a default limit for pubsub and slave clients, since
+# subscribers and slaves receive data in a push fashion.
+#
+# Both the hard or the soft limit can be disabled by setting them to zero.
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit slave 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
+# Redis calls an internal function to perform many background tasks, like
+# closing connections of clients in timeout, purging expired keys that are
+# never requested, and so forth.
+#
+# Not all tasks are performed with the same frequency, but Redis checks for
+# tasks to perform according to the specified "hz" value.
+#
+# By default "hz" is set to 10. Raising the value will use more CPU when
+# Redis is idle, but at the same time will make Redis more responsive when
+# there are many keys expiring at the same time, and timeouts may be
+# handled with more precision.
+#
+# The range is between 1 and 500, however a value over 100 is usually not
+# a good idea. Most users should use the default of 10 and raise this up to
+# 100 only in environments where very low latency is required.
+hz 10
+
+# When a child rewrites the AOF file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+aof-rewrite-incremental-fsync yes
diff --git a/src/redis/utilities/redis-utilities b/src/redis/utilities/redis-utilities
new file mode 100755
index 0000000..2540dfa
--- /dev/null
+++ b/src/redis/utilities/redis-utilities
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+# shellcheck source=src/common/utilities/common-utilities
+. "$SNAP/utilities/common-utilities"
+
+export REDIS_PIDFILE="/tmp/pids/redis.pid"
+export REDIS_SOCKET="/tmp/sockets/redis.sock"
+
+mkdir -p "$(dirname "$REDIS_PIDFILE")"
+mkdir -p "$(dirname "$REDIS_SOCKET")"
+mkdir -p "$SNAP_DATA/redis"
+chmod 750 "$(dirname "$REDIS_PIDFILE")"
+chmod 750 "$(dirname "$REDIS_SOCKET")"
+chmod 750 "$SNAP_DATA/redis"
+
+redis_is_running()
+{
+ [ -f "$REDIS_PIDFILE" ] && [ -S "$REDIS_SOCKET" ]
+}
+
+wait_for_redis()
+{
+ wait_for_command "Waiting for redis" redis_is_running
+}
+
+redis_pid()
+{
+ if redis_is_running; then
+ cat "$REDIS_PIDFILE"
+ else
+ echo "Unable to get redis PID as it's not yet running" >&2
+ echo ""
+ fi
+}