From 52b8cfb85b9b9cd14b3f9d89e02abba1042bf025 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 19 Oct 2021 10:58:24 -0400 Subject: [PATCH 01/23] Task servers at , may remove. --- ansible/roles/celery_task/tasks/main.yml | 6 +++--- terraform/digitalocean.tf | 4 ++-- terraform/variables.tf | 4 ++++ 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/ansible/roles/celery_task/tasks/main.yml b/ansible/roles/celery_task/tasks/main.yml index c2864125d..bfe026aa3 100644 --- a/ansible/roles/celery_task/tasks/main.yml +++ b/ansible/roles/celery_task/tasks/main.yml @@ -34,9 +34,9 @@ labels: autoheal: "true" networks_cli_compatible: yes - network_mode: default - networks: - - name: newsblurnet + network_mode: host + # networks: + # - name: newsblurnet command: "{{ item.command }}" log_driver: json-file log_options: diff --git a/terraform/digitalocean.tf b/terraform/digitalocean.tf index 6c6dc067c..bc0290269 100644 --- a/terraform/digitalocean.tf +++ b/terraform/digitalocean.tf @@ -519,7 +519,7 @@ resource "digitalocean_droplet" "task-celery" { image = var.droplet_os name = format("task-celery%02v", count.index+1) region = var.droplet_region - size = var.droplet_size + size = var.droplet_size_10 ssh_keys = [digitalocean_ssh_key.default.fingerprint] provisioner "local-exec" { # command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120" @@ -538,7 +538,7 @@ resource "digitalocean_droplet" "task-work" { image = var.droplet_os name = "task-work${count.index+1}" region = var.droplet_region - size = var.droplet_size + size = var.droplet_size_10 ssh_keys = [digitalocean_ssh_key.default.fingerprint] provisioner "local-exec" { command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120" diff --git a/terraform/variables.tf b/terraform/variables.tf index 8ca419205..7771aa619 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -9,6 +9,10 @@ variable "droplet_size" { type = string default = "s-1vcpu-1gb" } +variable "droplet_size_10" { + type = string + default = "s-1vcpu-2gb" +} variable "droplet_size_20" { type = string From e86967bfbae5ae43a354ce8220305050830cb194 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 19 Oct 2021 11:08:46 -0400 Subject: [PATCH 02/23] Don't check consul vs haproxy. Let consul be the exception adn the default being a full test. --- flask_monitor/db_monitor.py | 58 ++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/flask_monitor/db_monitor.py b/flask_monitor/db_monitor.py index 2d70054e1..e34fed587 100644 --- a/flask_monitor/db_monitor.py +++ b/flask_monitor/db_monitor.py @@ -84,39 +84,39 @@ def db_check_mongo(): return str(1) except: abort(503) - if request.args.get('haproxy') == '1': - try: - stories = db.stories.estimated_document_count() - except (pymongo.errors.NotMasterError, pymongo.errors.ServerSelectionTimeoutError): - abort(504) - except pymongo.errors.OperationFailure as e: - if 'Authentication failed' in str(e): - abort(505) - abort(506) - - if not stories: - abort(510) + + try: + stories = db.stories.estimated_document_count() + except (pymongo.errors.NotMasterError, pymongo.errors.ServerSelectionTimeoutError): + abort(504) + except pymongo.errors.OperationFailure as e: + if 'Authentication failed' in str(e): + abort(505) + abort(506) - status = client.admin.command('replSetGetStatus') - members = status['members'] - primary_optime = None - oldest_secondary_optime = None - for member in members: - member_state = member['state'] - optime = member['optime'] - if member_state == PRIMARY_STATE: - primary_optime = optime['ts'].time - elif member_state == SECONDARY_STATE: - if not oldest_secondary_optime or optime['ts'].time < oldest_secondary_optime: - oldest_secondary_optime = optime['ts'].time + if not stories: + abort(510) + + status = client.admin.command('replSetGetStatus') + members = status['members'] + primary_optime = None + oldest_secondary_optime = None + for member in members: + member_state = member['state'] + optime = member['optime'] + if member_state == PRIMARY_STATE: + primary_optime = optime['ts'].time + elif member_state == SECONDARY_STATE: + if not oldest_secondary_optime or optime['ts'].time < oldest_secondary_optime: + oldest_secondary_optime = optime['ts'].time - if not primary_optime or not oldest_secondary_optime: - abort(511) + if not primary_optime or not oldest_secondary_optime: + abort(511) - # if primary_optime - oldest_secondary_optime > 100: - # abort(512) + # if primary_optime - oldest_secondary_optime > 100: + # abort(512) - return str(stories) + return str(stories) @app.route("/db_check/mongo_analytics") def db_check_mongo_analytics(): From 1fdebf5c769f6437a5ab4b4c5cda6c6df8606dd4 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 19 Oct 2021 11:11:11 -0400 Subject: [PATCH 03/23] Removing haproxy=1 from health checks. Haproxy tests the full mongo db, so it's the default case, no need for exceptional query string like consul. --- config/debug_haproxy.conf | 2 +- config/haproxy.conf.template | 2 +- docker/haproxy/haproxy.consul.cfg.j2 | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/config/debug_haproxy.conf b/config/debug_haproxy.conf index ca210f023..ce5ca5067 100644 --- a/config/debug_haproxy.conf +++ b/config/debug_haproxy.conf @@ -124,7 +124,7 @@ backend postgres option httpchk GET /db_check/postgres server postgres-db01 db_pgsql:5000 check inter 2000ms backend mongo - option httpchk GET /db_check/mongo?haproxy=1 + option httpchk GET /db_check/mongo server mongo-db22 db_mongo:5000 check inter 2000ms backend redis option httpchk GET /db_check/redis diff --git a/config/haproxy.conf.template b/config/haproxy.conf.template index 71e6721ff..cb8245f73 100644 --- a/config/haproxy.conf.template +++ b/config/haproxy.conf.template @@ -146,7 +146,7 @@ backend postgres server postgres-db02 db_pgsql:5000 check inter 2000ms backend mongo - option httpchk GET /db_check/mongo?haproxy=1 + option httpchk GET /db_check/mongo server mongo-db20d db20d:5000 check inter 2000ms server mongo-db22 db22:5000 check inter 2000ms server mongo-db23a db23a:5000 check inter 2000ms diff --git a/docker/haproxy/haproxy.consul.cfg.j2 b/docker/haproxy/haproxy.consul.cfg.j2 index 303ae8992..1ee58e647 100644 --- a/docker/haproxy/haproxy.consul.cfg.j2 +++ b/docker/haproxy/haproxy.consul.cfg.j2 @@ -167,7 +167,7 @@ backend postgres server db-postgres db-postgres.node.nyc1.consul:5579 check inter 2000ms resolvers consul resolve-prefer ipv4 resolve-opts allow-dup-ip init-addr none backend mongo - option httpchk GET /db_check/mongo?haproxy=1 + option httpchk GET /db_check/mongo default-server check inter 2000ms resolvers consul resolve-prefer ipv4 resolve-opts allow-dup-ip init-addr none {% for host in groups.mongo %} server {{host}} {{host}}.node.nyc1.consul:5579 From 2a8f8678dc1dc12963d4d28a89d381274878ff7d Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 19 Oct 2021 11:15:47 -0400 Subject: [PATCH 04/23] Splitting not master error and server selection timeout in mongo monitor. --- flask_monitor/db_monitor.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/flask_monitor/db_monitor.py b/flask_monitor/db_monitor.py index e34fed587..5a1eaf05a 100644 --- a/flask_monitor/db_monitor.py +++ b/flask_monitor/db_monitor.py @@ -87,12 +87,14 @@ def db_check_mongo(): try: stories = db.stories.estimated_document_count() - except (pymongo.errors.NotMasterError, pymongo.errors.ServerSelectionTimeoutError): + except pymongo.errors.NotMasterError: abort(504) + except pymongo.errors.ServerSelectionTimeoutError: + abort(505) except pymongo.errors.OperationFailure as e: if 'Authentication failed' in str(e): - abort(505) - abort(506) + abort(506) + abort(507) if not stories: abort(510) From ce34d598ca4877abcae802c0293728110daa077a Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 19 Oct 2021 11:26:22 -0400 Subject: [PATCH 05/23] Directory needs changing from elasticsearch_exporter to elasticsearch-exporter. --- ansible/playbooks/setup_elasticsearch.yml | 2 +- ansible/roles/elasticsearch-exporter/tasks/main.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/playbooks/setup_elasticsearch.yml b/ansible/playbooks/setup_elasticsearch.yml index 6bbfe92dd..8635bc72b 100644 --- a/ansible/playbooks/setup_elasticsearch.yml +++ b/ansible/playbooks/setup_elasticsearch.yml @@ -16,5 +16,5 @@ - {role: 'consul-client', tags: 'consul'} - {role: 'elasticsearch', tags: 'elasticsearch'} - {role: 'node-exporter', tags: ['node-exporter', 'metrics']} - - {role: 'elasticsearch-exporter', tags: ['metrics', elasticsearch]} + - {role: 'elasticsearch-exporter', tags: ['metrics', 'elasticsearch']} - {role: 'monitor', tags: 'monitor'} diff --git a/ansible/roles/elasticsearch-exporter/tasks/main.yml b/ansible/roles/elasticsearch-exporter/tasks/main.yml index d563f02c4..0d04b4acc 100644 --- a/ansible/roles/elasticsearch-exporter/tasks/main.yml +++ b/ansible/roles/elasticsearch-exporter/tasks/main.yml @@ -3,7 +3,7 @@ tags: consul become: yes template: - src: /srv/newsblur/ansible/roles/elasticsearch_exporter/templates/consul_service.json + src: /srv/newsblur/ansible/roles/elasticsearch-exporter/templates/consul_service.json dest: /etc/consul.d/elasticsearch_exporter.json notify: - reload consul From 5915ee1a3e7213fa70c9088af9a56fa62ccaed64 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 19 Oct 2021 12:53:11 -0400 Subject: [PATCH 06/23] Restarting grafana on build/deploy. --- ansible/roles/elasticsearch-exporter/tasks/main.yml | 2 +- ansible/roles/grafana/handlers/main.yml | 5 +++++ ansible/roles/grafana/tasks/main.yml | 8 ++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 ansible/roles/grafana/handlers/main.yml diff --git a/ansible/roles/elasticsearch-exporter/tasks/main.yml b/ansible/roles/elasticsearch-exporter/tasks/main.yml index 0d04b4acc..b39dedcb3 100644 --- a/ansible/roles/elasticsearch-exporter/tasks/main.yml +++ b/ansible/roles/elasticsearch-exporter/tasks/main.yml @@ -16,7 +16,7 @@ image: prometheuscommunity/elasticsearch-exporter:latest pull: yes state: started - command: '--es.uri=http://db_elasticsearch:9200' + command: '--es.uri=http://elasticsearch:9200' hostname: "{{ inventory_hostname }}" restart_policy: unless-stopped container_default_behavior: no_defaults diff --git a/ansible/roles/grafana/handlers/main.yml b/ansible/roles/grafana/handlers/main.yml new file mode 100644 index 000000000..11142d666 --- /dev/null +++ b/ansible/roles/grafana/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart grafana + become: yes + command: docker restart grafana + listen: restart grafana diff --git a/ansible/roles/grafana/tasks/main.yml b/ansible/roles/grafana/tasks/main.yml index 298b633cc..d56f91afc 100644 --- a/ansible/roles/grafana/tasks/main.yml +++ b/ansible/roles/grafana/tasks/main.yml @@ -3,6 +3,7 @@ template: src: /srv/newsblur/docker/grafana/grafana.ini.j2 dest: /srv/newsblur/docker/grafana/grafana.ini + notify: restart grafana - name: Register grafana in consul tags: consul @@ -33,3 +34,10 @@ - /srv/newsblur/docker/volumes/grafana_data:/var/lib/grafana - /srv/newsblur/docker/grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml - /srv/newsblur/docker/grafana/dashboards/:/etc/grafana/provisioning/dashboards/ + +- name: Restart grafana + debug: + msg: Restarting grafana + changed_when: yes + notify: + - restart grafana From 95693dae821c6fc10eb71c3ea6829fc84c7780ff Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 19 Oct 2021 13:00:30 -0400 Subject: [PATCH 07/23] Handling missing .env file in node. --- node/newsblur.coffee | 13 +++++++------ node/newsblur.js | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/node/newsblur.coffee b/node/newsblur.coffee index 62d65ba67..f9bcf2007 100644 --- a/node/newsblur.coffee +++ b/node/newsblur.coffee @@ -8,7 +8,8 @@ if envresult.error # throw envresult.error envresult = require('dotenv').config() if envresult.error - throw envresult.error + log.debug " ---> No .env file found, using defaults" + # throw envresult.error ENV_DEV = process.env.NODE_ENV == 'development' ENV_PROD = process.env.NODE_ENV == 'production' @@ -28,12 +29,12 @@ if ENV_PROD debug: true, tracesSampleRate: 1.0 - app.use(Sentry.Handlers.requestHandler()) + app.use Sentry.Handlers.requestHandler() -original_page(app) -original_text(app) -favicons(app) -unread_counts(server) +original_page app +original_text app +favicons app +unread_counts server if ENV_PROD app.get "/debug", (req, res) -> diff --git a/node/newsblur.js b/node/newsblur.js index 34626956f..3ff55dd99 100644 --- a/node/newsblur.js +++ b/node/newsblur.js @@ -20,11 +20,11 @@ // throw envresult.error envresult = require('dotenv').config(); if (envresult.error) { - log.debug(`There is no .env file. Continuing...`); - //throw envresult.error; + log.debug(" ---> No .env file found, using defaults"); } } + // throw envresult.error ENV_DEV = process.env.NODE_ENV === 'development'; ENV_PROD = process.env.NODE_ENV === 'production'; From 5734225d6f6242ef2b20dcc13de20e12a7221c31 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Wed, 20 Oct 2021 16:46:19 -0400 Subject: [PATCH 08/23] Updating Redis to v6, adding redis servers. --- ansible/roles/redis/tasks/main.yml | 22 +- ansible/roles/redis/templates/redis.conf | 1879 +++++++++++++++++ .../roles/redis/templates/redis_replica.conf | 1 + config/redis-init | 68 - config/redis.conf | 599 ------ config/redis_master.conf | 0 config/redis_rclocal.txt | 8 - config/redis_slave.conf | 1 - docker-compose.yml | 2 +- terraform/digitalocean.tf | 12 +- terraform/variables.tf | 10 + 11 files changed, 1908 insertions(+), 694 deletions(-) create mode 100644 ansible/roles/redis/templates/redis.conf create mode 100644 ansible/roles/redis/templates/redis_replica.conf delete mode 100644 config/redis-init delete mode 100644 config/redis.conf delete mode 100644 config/redis_master.conf delete mode 100644 config/redis_rclocal.txt delete mode 100644 config/redis_slave.conf diff --git a/ansible/roles/redis/tasks/main.yml b/ansible/roles/redis/tasks/main.yml index 1af0c993a..03a1ea5f8 100644 --- a/ansible/roles/redis/tasks/main.yml +++ b/ansible/roles/redis/tasks/main.yml @@ -3,7 +3,7 @@ become: yes docker_container: name: redis - image: redis:6.2.1 + image: redis:6.2.6 state: started hostname: "{{ inventory_hostname }}" ports: @@ -11,18 +11,18 @@ restart_policy: unless-stopped volumes: - /srv/newsblur/docker/volumes/redis:/data - - /srv/newsblur/config/redis.conf:/usr/local/etc/redis_server.conf - - /srv/newsblur/config/redis_docker.conf:/etc/redis_server.conf + - /srv/newsblur/ansible/roles/redis/templates/redis.conf:/usr/local/etc/redis_server.conf + - /srv/newsblur/ansible/roles/redis/templates/redis_replica.conf:/usr/local/etc/redis_replica.conf - /srv/newsblur/docker/volumes/redis.var.lib:/var/lib/redis -- name: Register redis in consul - tags: consul - become: yes - template: - src: consul_service.json - dest: /etc/consul.d/redis.json - notify: - - reload consul +# - name: Register redis in consul +# become: yes +# template: +# src: consul_service.json +# dest: /etc/consul.d/redis.json +# notify: +# - reload consul +# tags: consul - name: Add sanity checkers cronjob for disk usage become: yes diff --git a/ansible/roles/redis/templates/redis.conf b/ansible/roles/redis/templates/redis.conf new file mode 100644 index 000000000..e011590a9 --- /dev/null +++ b/ansible/roles/redis/templates/redis.conf @@ -0,0 +1,1879 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all available network interfaces on the host machine. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only on the +# IPv4 loopback interface address (this means Redis will only be able to +# accept client connections from the same host that it is running on). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT OUT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 127.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode yes + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Force network equipment in the middle to consider the connection to be +# alive. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# Explicitly specify TLS versions to support. Allowed values are case insensitive +# and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or +# any combination. To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous pings back to your supervisor. +supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY. Basically this means +# that normally a logo is displayed only in interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo yes + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behavior will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# By default compression is enabled as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth +# +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all commands except: +# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. +# +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if your do what you are doing. +# ----------------------------------------------------------------------------- +# +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# received from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and salve buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep a copy of the current db contents in RAM while parsing +# the data directly from the socket. note that this requires +# sufficient memory, if you don't have it, you risk an OOM kill. +repl-diskless-load disabled + +# Replicas send PINGs to server in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. +# +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. +# +# The backlog is only allocated if there is at least one replica connected. +# +# repl-backlog-size 1mb + +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with other replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# 16 millions of slots, what clients may have certain subsets of keys. In turn +# this is used in order to send invalidation messages to clients. Please +# check this page to understand more about the feature: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 + +################################## SECURITY ################################### + +# Warning: since Redis is pretty fast, an outside user can try up to +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what a user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# + Allow the execution of that command +# - Disallow the execution of that command +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|subcommand Allow a specific subcommand of an otherwise +# disabled command. Note that this form is not +# allowed as negative like -DEBUG|SEGFAULT, but +# only additive starting with "+". +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# > Add this password to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. +# +# requirepass foobared + +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select one from the following behaviors: +# +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key having an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. +# +# active-expire-effort 1 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives. + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speed up the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usual. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Aso this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis threads, otherwise you'll not +# be able to notice the improvements. + +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. +# +# Redis supports three options: +# +# no: Don't make changes to oom-score-adj (default). +# yes: Alias to "relative" see below. +# absolute: Values in oom-score-adj-values are written as is to the kernel. +# relative: Values are used relative to the initial value of oom_score_adj when +# the server starts and are then clamped to a range of -1000 to 1000. +# Because typically the initial value is 0, they will often match the +# absolute values. +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -2000 to +# 2000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. This means that setting oom-score-adj to "relative" and setting the +# oom-score-adj-values to positive values will always succeed. +oom-score-adj-values 0 200 800 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading, Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, then continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet call any write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are a multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large cluster-replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the cluster-replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least a hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the master can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following two options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-bus-port +# +# Each instructs the node about its address, client port, and cluster message +# bus port. The information is then published in the header of the bus packets +# so that other nodes will be able to correctly map the address of the node +# publishing the information. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usual. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-port 6379 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# t Stream commands +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxet, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### GOPHER SERVER ################################# + +# Redis contains an implementation of the Gopher protocol, as specified in +# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). +# +# The Gopher protocol was very popular in the late '90s. It is an alternative +# to the web, and the implementation both server and client side is so simple +# that the Redis server has just 100 lines of code in order to implement this +# support. +# +# What do you do with Gopher nowadays? Well Gopher never *really* died, and +# lately there is a movement in order for the Gopher more hierarchical content +# composed of just plain text documents to be resurrected. Some want a simpler +# internet, others believe that the mainstream internet became too much +# controlled, and it's cool to create an alternative space for people that +# want a bit of fresh air. +# +# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol +# as a gift. +# +# --- HOW IT WORKS? --- +# +# The Redis Gopher support uses the inline protocol of Redis, and specifically +# two kind of inline requests that were anyway illegal: an empty request +# or any request that starts with "/" (there are no Redis commands starting +# with such a slash). Normal RESP2/RESP3 requests are completely out of the +# path of the Gopher protocol implementation and are served as usual as well. +# +# If you open a connection to Redis when Gopher is enabled and send it +# a string like "/foo", if there is a key named "/foo" it is served via the +# Gopher protocol. +# +# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher +# talking), you likely need a script like the following: +# +# https://github.com/antirez/gopher2redis +# +# --- SECURITY WARNING --- +# +# If you plan to put Redis on the internet in a publicly accessible address +# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. +# Once a password is set: +# +# 1. The Gopher server (when enabled, not by default) will still serve +# content via Gopher. +# 2. However other commands cannot be called before the client will +# authenticate. +# +# So use the 'requirepass' option to protect your instance. +# +# Note that Gopher is not currently supported when 'io-threads-do-reads' +# is enabled. +# +# To enable Gopher support, uncomment the following line and set the option +# from no (the default) to yes. +# +# gopher-enabled no + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. However you can change this limit +# here, but must be 1mb or greater +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporarily raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in a "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag no + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 + +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 + +# In some cases redis will emit warnings and even refuse to start if it detects +# that the system is in bad state, it is possible to suppress these warnings +# by setting the following config which takes a space delimited list of warnings +# to suppress +# +# ignore-warnings ARM64-COW-BUG + +include /usr/local/etc/redis_server.conf diff --git a/ansible/roles/redis/templates/redis_replica.conf b/ansible/roles/redis/templates/redis_replica.conf new file mode 100644 index 000000000..029abb91f --- /dev/null +++ b/ansible/roles/redis/templates/redis_replica.conf @@ -0,0 +1 @@ +replicaof redis.service.nyc1.consul diff --git a/config/redis-init b/config/redis-init deleted file mode 100644 index a075105cf..000000000 --- a/config/redis-init +++ /dev/null @@ -1,68 +0,0 @@ -### BEGIN INIT INFO -# Provides: redis-server -# Required-Start: $syslog $remote_fs -# Required-Stop: $syslog $remote_fs -# Should-Start: $local_fs -# Should-Stop: $local_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: redis-server - Persistent key-value db -# Description: redis-server - Persistent key-value db -### END INIT INFO - -PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin -DAEMON=/usr/local/bin/redis-server -DAEMON_ARGS=/etc/redis.conf -NAME=redis-server -DESC=redis-server -PIDFILE=/var/run/redis.pid - -test -x $DAEMON || exit 0 - -set -e - -case "$1" in - start) - echo -n "Starting $DESC: " - touch $PIDFILE - if start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- $DAEMON_ARGS - then - echo "$NAME." - else - echo "failed" - fi - ;; - stop) - echo -n "Stopping $DESC: " - if start-stop-daemon --stop --retry 10 --quiet --oknodo --pidfile $PIDFILE --exec $DAEMON - then - echo "$NAME." - else - echo "failed" - fi - rm -f $PIDFILE - ;; - - restart|force-reload) - ${0} stop - ${0} start - ;; - - status) - echo -n "$DESC is " - if start-stop-daemon --stop --quiet --signal 0 --name ${NAME} --pidfile ${PIDFILE} - then - echo "running" - else - echo "not running" - exit 1 - fi - ;; - - *) - echo "Usage: /etc/init.d/$NAME {start|stop|restart|force-reload}" >&2 - exit 1 - ;; -esac - -exit 0 \ No newline at end of file diff --git a/config/redis.conf b/config/redis.conf deleted file mode 100644 index 34156f4ab..000000000 --- a/config/redis.conf +++ /dev/null @@ -1,599 +0,0 @@ -# Redis configuration file example - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize no - -# When running daemonized, Redis writes a pid file in /var/run/redis.pid by -# default. You can specify a custom pid file location here. -#pidfile /var/run/redis.pid - -# Accept connections on the specified port, default is 6379. -# If port 0 is specified Redis will not listen on a TCP socket. -port 6379 - -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. -# -# bind 127.0.0.1 - -# Specify the path for the unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -# unixsocket /tmp/redis.sock -# unixsocketperm 755 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 60 seconds. -tcp-keepalive 0 - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel notice - -# Specify the log file name. Also 'stdout' can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -# logfile stdout - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -syslog-enabled no - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################# -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -save 900 1 -save 300 10 -save 60 10000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in an hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# distater will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usually even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error no - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir /var/lib/redis - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. Note that the configuration is local to the slave -# so for example it is possible to configure the slave to save the DB with a -# different interval, or to listen to another port, and so on. -# -# slaveof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth -protected-mode no - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data yes - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only no - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -repl-timeout 600 - -# Disable TCP_NODELAY on the slave socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the slave side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one wtih priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -# requirepass foobared - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -maxclients 100000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU cache, or to set -# an hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# -# volatile-lru -> remove the key with an expire set using an LRU algorithm -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# volatile-random -> remove a random key with an expire set -# allkeys-random -> remove a random key, any key -# volatile-ttl -> remove the key with the nearest expire time (minor TTL) -# noeviction -> don't expire at all, just return an error on write operations -# -# Note: with any of the above policies, Redis will return an error on write -# operations, when there are not suitable keys for eviction. -# -# At the date of writing this commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -# maxmemory-policy volatile-lru - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default Redis will check three keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -# maxmemory-samples 3 - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly no - -# The name of the append only file (default: "appendonly.aof") -# appendfilename appendonly.aof - -# The fsync() call tells the Operating System to actually write data on disk -# instead to wait for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log . Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 1000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 1000000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 1024 - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 - -# Similarly to hashes, small lists are also encoded in a special way in order -# to save a lot of space. The special representation is only used when -# you are under the following limits: -list-max-ziplist-entries 512 -list-max-ziplist-value 64 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happens to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into an hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# active rehashing the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply form time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients -# slave -> slave clients and MONITOR clients -# pubsub -> clients subcribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave 1024mb 512mb 360 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeot, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are perforemd with the same frequency, but Redis checks for -# tasks to perform accordingly to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis server but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# include /path/to/local.conf -# include /path/to/other.conf -#include /etc/redis/redis_server.conf diff --git a/config/redis_master.conf b/config/redis_master.conf deleted file mode 100644 index e69de29bb..000000000 diff --git a/config/redis_rclocal.txt b/config/redis_rclocal.txt deleted file mode 100644 index 5a1484f2c..000000000 --- a/config/redis_rclocal.txt +++ /dev/null @@ -1,8 +0,0 @@ -if test -f /sys/kernel/mm/transparent_hugepage/enabled; then - echo never > /sys/kernel/mm/transparent_hugepage/enabled -fi -if test -f /sys/kernel/mm/transparent_hugepage/defrag; then - echo never > /sys/kernel/mm/transparent_hugepage/defrag -fi - -exit 0; \ No newline at end of file diff --git a/config/redis_slave.conf b/config/redis_slave.conf deleted file mode 100644 index d6e21a287..000000000 --- a/config/redis_slave.conf +++ /dev/null @@ -1 +0,0 @@ -slaveof db_redis 6379 \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 80911f77d..9af46417e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -93,7 +93,7 @@ services: - ./docker/volumes/postgres:/var/lib/postgresql/data db_redis: - image: redis:3.2.6 + image: redis:latest ports: - 6579:6579 container_name: db_redis diff --git a/terraform/digitalocean.tf b/terraform/digitalocean.tf index bc0290269..fe43a8d8a 100644 --- a/terraform/digitalocean.tf +++ b/terraform/digitalocean.tf @@ -304,7 +304,7 @@ resource "digitalocean_droplet" "db-redis-user" { image = var.droplet_os name = "db-redis-user" region = var.droplet_region - size = var.droplet_size + size = var.droplet_size_40 ssh_keys = [digitalocean_ssh_key.default.fingerprint] provisioner "local-exec" { command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120" @@ -321,7 +321,7 @@ resource "digitalocean_droplet" "db-redis-sessions" { image = var.droplet_os name = "db-redis-sessions" region = var.droplet_region - size = var.droplet_size + size = var.droplet_size_20 ssh_keys = [digitalocean_ssh_key.default.fingerprint] provisioner "local-exec" { command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120" @@ -338,7 +338,7 @@ resource "digitalocean_droplet" "db-redis-story" { image = var.droplet_os name = "db-redis-story" region = var.droplet_region - size = var.droplet_size + size = var.redis_story_droplet_size ssh_keys = [digitalocean_ssh_key.default.fingerprint] provisioner "local-exec" { command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120" @@ -519,7 +519,7 @@ resource "digitalocean_droplet" "task-celery" { image = var.droplet_os name = format("task-celery%02v", count.index+1) region = var.droplet_region - size = var.droplet_size_10 + size = var.droplet_size ssh_keys = [digitalocean_ssh_key.default.fingerprint] provisioner "local-exec" { # command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120" @@ -534,11 +534,11 @@ resource "digitalocean_droplet" "task-celery" { } resource "digitalocean_droplet" "task-work" { - count = 2 + count = 3 image = var.droplet_os name = "task-work${count.index+1}" region = var.droplet_region - size = var.droplet_size_10 + size = var.droplet_size ssh_keys = [digitalocean_ssh_key.default.fingerprint] provisioner "local-exec" { command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120" diff --git a/terraform/variables.tf b/terraform/variables.tf index 7771aa619..95bb5b961 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -24,6 +24,11 @@ variable "droplet_size_120" { default = "g-8vcpu-32gb" } +variable "droplet_size_40" { + type = string + default = "s-4vcpu-8gb" +} + variable "droplet_os" { type = string default = "ubuntu-20-04-x64" @@ -59,3 +64,8 @@ variable "elasticsearch_droplet_size" { type = string default = "m3-2vcpu-16gb" } + +variable "redis_story_droplet_size" { + type = string + default = "m-2vcpu-16gb" +} From e0af49022e21fa0bcb7a382d3f541078862d81f8 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Wed, 20 Oct 2021 17:12:24 -0400 Subject: [PATCH 09/23] Adjusting redis conf dirs. --- ansible/roles/redis/tasks/main.yml | 13 +++++++++++-- ansible/roles/redis/templates/redis.conf | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/ansible/roles/redis/tasks/main.yml b/ansible/roles/redis/tasks/main.yml index 03a1ea5f8..b8ce61741 100644 --- a/ansible/roles/redis/tasks/main.yml +++ b/ansible/roles/redis/tasks/main.yml @@ -5,14 +5,23 @@ name: redis image: redis:6.2.6 state: started + command: /usr/local/etc/redis/redis_server.conf + container_default_behavior: no_defaults hostname: "{{ inventory_hostname }}" + networks_cli_compatible: yes + # network_mode: host + network_mode: default + networks: + - name: newsblurnet + aliases: + - redis ports: - 6379:6379 restart_policy: unless-stopped volumes: - /srv/newsblur/docker/volumes/redis:/data - - /srv/newsblur/ansible/roles/redis/templates/redis.conf:/usr/local/etc/redis_server.conf - - /srv/newsblur/ansible/roles/redis/templates/redis_replica.conf:/usr/local/etc/redis_replica.conf + - /srv/newsblur/ansible/roles/redis/templates/redis.conf:/usr/local/etc/redis/redis_server.conf + - /srv/newsblur/ansible/roles/redis/templates/redis_replica.conf:/usr/local/etc/redis/redis_replica.conf - /srv/newsblur/docker/volumes/redis.var.lib:/var/lib/redis # - name: Register redis in consul diff --git a/ansible/roles/redis/templates/redis.conf b/ansible/roles/redis/templates/redis.conf index e011590a9..3f447291e 100644 --- a/ansible/roles/redis/templates/redis.conf +++ b/ansible/roles/redis/templates/redis.conf @@ -1876,4 +1876,4 @@ jemalloc-bg-thread yes # # ignore-warnings ARM64-COW-BUG -include /usr/local/etc/redis_server.conf +include /usr/local/etc/redis/redis_replica.conf From 24d6868108fc4e55c6a907b49ea52757964c8f60 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Wed, 20 Oct 2021 17:18:28 -0400 Subject: [PATCH 10/23] Redis port --- ansible/roles/redis/templates/redis_replica.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/redis/templates/redis_replica.conf b/ansible/roles/redis/templates/redis_replica.conf index 029abb91f..42530a3ba 100644 --- a/ansible/roles/redis/templates/redis_replica.conf +++ b/ansible/roles/redis/templates/redis_replica.conf @@ -1 +1 @@ -replicaof redis.service.nyc1.consul +replicaof redis.service.nyc1.consul 6379 From db5cb4bd9442e758721ffd40fc9642fea5f035c2 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Wed, 20 Oct 2021 17:27:20 -0400 Subject: [PATCH 11/23] Moving redis conf to template to distinguish servers. --- .gitignore | 1 + ansible/roles/redis/tasks/main.yml | 11 +++++++++-- ansible/roles/redis/templates/redis_replica.conf | 1 - .../roles/redis/templates => docker/redis}/redis.conf | 0 docker/redis/redis_replica.conf.j2 | 1 + 5 files changed, 11 insertions(+), 3 deletions(-) delete mode 100644 ansible/roles/redis/templates/redis_replica.conf rename {ansible/roles/redis/templates => docker/redis}/redis.conf (100%) create mode 100644 docker/redis/redis_replica.conf.j2 diff --git a/.gitignore b/.gitignore index ddc5d975d..6c0fc1d9e 100644 --- a/.gitignore +++ b/.gitignore @@ -51,6 +51,7 @@ docker/haproxy/haproxy.consul.cfg # docker/haproxy/haproxy.staging.cfg # Staging doesn't use jinja templates, so no need to ignore docker/nginx/nginx.consul.conf docker/prometheus/prometheus.yml +docker/redis/redis_replica.conf # ---------------------- # Android diff --git a/ansible/roles/redis/tasks/main.yml b/ansible/roles/redis/tasks/main.yml index b8ce61741..0ac9e33d2 100644 --- a/ansible/roles/redis/tasks/main.yml +++ b/ansible/roles/redis/tasks/main.yml @@ -1,4 +1,11 @@ --- +- name: Template redis_replica.conf file + template: + src: /srv/newsblur/docker/redis_replica.conf.j2 + dest: /srv/newsblur/docker/redis/redis_replica.conf + notify: restart redis + register: updated_config + - name: Start redis docker containers become: yes docker_container: @@ -20,8 +27,8 @@ restart_policy: unless-stopped volumes: - /srv/newsblur/docker/volumes/redis:/data - - /srv/newsblur/ansible/roles/redis/templates/redis.conf:/usr/local/etc/redis/redis_server.conf - - /srv/newsblur/ansible/roles/redis/templates/redis_replica.conf:/usr/local/etc/redis/redis_replica.conf + - /srv/newsblur/docker/redis/redis.conf:/usr/local/etc/redis/redis_server.conf + - /srv/newsblur/docker/redis/redis_replica.conf:/usr/local/etc/redis/redis_replica.conf - /srv/newsblur/docker/volumes/redis.var.lib:/var/lib/redis # - name: Register redis in consul diff --git a/ansible/roles/redis/templates/redis_replica.conf b/ansible/roles/redis/templates/redis_replica.conf deleted file mode 100644 index 42530a3ba..000000000 --- a/ansible/roles/redis/templates/redis_replica.conf +++ /dev/null @@ -1 +0,0 @@ -replicaof redis.service.nyc1.consul 6379 diff --git a/ansible/roles/redis/templates/redis.conf b/docker/redis/redis.conf similarity index 100% rename from ansible/roles/redis/templates/redis.conf rename to docker/redis/redis.conf diff --git a/docker/redis/redis_replica.conf.j2 b/docker/redis/redis_replica.conf.j2 new file mode 100644 index 000000000..c72519668 --- /dev/null +++ b/docker/redis/redis_replica.conf.j2 @@ -0,0 +1 @@ +replicaof {{ inventory_hostname|regex_replace('\d+', '') }}.service.nyc1.consul 6379 From 1dbc1007d0062361fc470239c31d13b2a2ca1695 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Wed, 22 Sep 2021 16:05:49 -0400 Subject: [PATCH 12/23] Adding mute user feature to hide public comments from users you don't want to see. --- apps/social/models.py | 51 ++++++++++++++++--- apps/social/urls.py | 2 + apps/social/views.py | 36 +++++++++++++ media/css/reader.css | 1 + media/js/newsblur/common/assetmodel.js | 14 +++++ media/js/newsblur/views/profile_badge_view.js | 41 +++++++++++++-- templates/static/api.yml | 20 ++++++++ 7 files changed, 153 insertions(+), 12 deletions(-) diff --git a/apps/social/models.py b/apps/social/models.py index 0d0d2a041..eb4bb29ad 100644 --- a/apps/social/models.py +++ b/apps/social/models.py @@ -131,6 +131,8 @@ class MSocialProfile(mongo.Document): follower_user_ids = mongo.ListField(mongo.IntField()) unfollowed_user_ids = mongo.ListField(mongo.IntField()) requested_follow_user_ids = mongo.ListField(mongo.IntField()) + muting_user_ids = mongo.ListField(mongo.IntField()) + muted_by_user_ids = mongo.ListField(mongo.IntField()) popular_publishers = mongo.StringField() stories_last_month = mongo.IntField(default=0) average_stories_per_month = mongo.IntField(default=0) @@ -145,7 +147,16 @@ class MSocialProfile(mongo.Document): meta = { 'collection': 'social_profile', - 'indexes': ['user_id', 'username', 'following_user_ids', 'follower_user_ids', 'unfollowed_user_ids', 'requested_follow_user_ids'], + 'indexes': [ + 'user_id', + 'username', + 'following_user_ids', + 'follower_user_ids', + 'unfollowed_user_ids', + 'requested_follow_user_ids', + 'muting_user_ids', + 'muted_by_user_ids', + ], 'allow_inheritance': False, } @@ -423,6 +434,7 @@ class MSocialProfile(mongo.Document): if include_following_user != self.user_id: params['followed_by_you'] = bool(self.is_followed_by_user(include_following_user)) params['following_you'] = self.is_following_user(include_following_user) + params['muted'] = include_following_user in self.muted_by_user_ids return params @@ -688,7 +700,27 @@ class MSocialProfile(mongo.Document): email_type='follow_request') logging.user(user, "~BB~FM~SBSending email for follow request: %s" % follower_profile.user.username) - + + def mute_user(self, muting_user_id): + if muting_user_id not in self.muting_user_ids: + self.muting_user_ids.append(muting_user_id) + self.save() + + muting_user_profile = MSocialProfile.get_user(muting_user_id) + if self.user_id not in muting_user_profile.muted_by_user_ids: + muting_user_profile.muted_by_user_ids.append(self.user_id) + muting_user_profile.save() + + def unmute_user(self, muting_user_id): + if muting_user_id in self.muting_user_ids: + self.muting_user_ids.remove(muting_user_id) + self.save() + + muting_user_profile = MSocialProfile.get_user(muting_user_id) + if self.user_id in muting_user_profile.muted_by_user_ids: + muting_user_profile.muted_by_user_ids.remove(self.user_id) + muting_user_profile.save() + def save_feed_story_history_statistics(self): """ Fills in missing months between earlier occurances and now. @@ -2038,17 +2070,20 @@ class MSharedStory(mongo.DynamicDocument): profile_user_ids = profile_user_ids.union(comments['liking_users']) profiles = MSocialProfile.objects.filter(user_id__in=list(profile_user_ids)) - profiles = [profile.canonical(compact=True) for profile in profiles] - # Toss public comments by private profiles + # Toss public comments by private profiles and muted users profiles_dict = dict((profile['user_id'], profile) for profile in profiles) for story in stories: commented_by_public = story.get('commented_by_public') or [c['user_id'] for c in story['public_comments']] - for user_id in commented_by_public: - if profiles_dict[user_id]['private']: - story['public_comments'] = [c for c in story['public_comments'] if c['user_id'] != user_id] + for comment_user_id in commented_by_public: + private = profiles_dict[comment_user_id].private + muted = user_id in profiles_dict[comment_user_id].muted_by_user_ids + if private or muted: + story['public_comments'] = [c for c in story['public_comments'] if c['user_id'] != comment_user_id] story['comment_count_public'] -= 1 - + + profiles = [profile.canonical(compact=True) for profile in profiles] + return stories, profiles @staticmethod diff --git a/apps/social/urls.py b/apps/social/urls.py index 1db6b2262..2b386b217 100644 --- a/apps/social/urls.py +++ b/apps/social/urls.py @@ -18,6 +18,8 @@ urlpatterns = [ url(r'^unfollow/?$', views.unfollow, name='social-unfollow'), url(r'^approve_follower/?$', views.approve_follower, name='social-approve-follower'), url(r'^ignore_follower/?$', views.ignore_follower, name='social-ignore-follower'), + url(r'^mute_user/?$', views.mute_user, name='social-mute-user'), + url(r'^unmute_user/?$', views.unmute_user, name='social-unmute-user'), url(r'^feed_trainer', views.social_feed_trainer, name='social-feed-trainer'), url(r'^public_comments/?$', views.story_public_comments, name='story-public-comments'), url(r'^save_comment_reply/?$', views.save_comment_reply, name='social-save-comment-reply'), diff --git a/apps/social/views.py b/apps/social/views.py index 03690df1f..28eb35ae9 100644 --- a/apps/social/views.py +++ b/apps/social/views.py @@ -1178,6 +1178,42 @@ def ignore_follower(request): return {'code': code} +@ajax_login_required +@required_params('user_id', method="POST") +@json.json_view +def mute_user(request): + profile = MSocialProfile.get_user(request.user.pk) + muting_user_id = int(request.POST['user_id']) + social_profile = MSocialProfile.get_user(request.user.pk) + muting_profile = MSocialProfile.get_user(muting_user_id) + code = 1 + + logging.user(request, "~FMMuting user ~SB%s" % muting_profile.username) + + social_profile.mute_user(muting_user_id) + + return { + 'code': code, + 'user_profile': social_profile.canonical(), + } + +@ajax_login_required +@required_params('user_id', method="POST") +@json.json_view +def unmute_user(request): + profile = MSocialProfile.get_user(request.user.pk) + muting_user_id = int(request.POST['user_id']) + muting_profile = MSocialProfile.get_user(muting_user_id) + code = 1 + + logging.user(request, "~FM~SBUn-~SN~FMMuting user ~SB%s" % muting_profile.username) + + profile.unmute_user(muting_user_id) + + return { + 'code': code, + 'user_profile': profile.canonical(), + } @required_params('query', method="GET") @json.json_view diff --git a/media/css/reader.css b/media/css/reader.css index cc1e6bb7b..575aa0d35 100644 --- a/media/css/reader.css +++ b/media/css/reader.css @@ -11998,6 +11998,7 @@ form.opml_import_form input { } .NB-profile-badge-actions .NB-profile-badge-action-preview, .NB-badge-actions .NB-badge-action-add, +.NB-profile-badge-actions .NB-profile-badge-action-mute, .NB-profile-badge-actions .NB-profile-badge-action-ignore { color: #404040; line-height: 1; diff --git a/media/js/newsblur/common/assetmodel.js b/media/js/newsblur/common/assetmodel.js index cf1c4301d..a4f15e1f1 100644 --- a/media/js/newsblur/common/assetmodel.js +++ b/media/js/newsblur/common/assetmodel.js @@ -1861,6 +1861,20 @@ NEWSBLUR.AssetModel = Backbone.Router.extend({ callback(data); }, this)); }, + + mute_user: function (user_id, callback) { + this.make_request('/social/mute_user', {'user_id': user_id}, _.bind(function(data) { + this.user_profile.set(data.user_profile); + callback(data); + }, this)); + }, + + unmute_user: function (user_id, callback) { + this.make_request('/social/unmute_user', {'user_id': user_id}, _.bind(function(data) { + this.user_profile.set(data.user_profile); + callback(data); + }, this)); + }, approve_follower: function(user_id, callback) { this.make_request('/social/approve_follower', {'user_id': user_id}, _.bind(function(data) { diff --git a/media/js/newsblur/views/profile_badge_view.js b/media/js/newsblur/views/profile_badge_view.js index 29679a1e6..62c2b8792 100644 --- a/media/js/newsblur/views/profile_badge_view.js +++ b/media/js/newsblur/views/profile_badge_view.js @@ -6,6 +6,7 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({ "click .NB-profile-badge-action-follow": "follow_user", "click .NB-profile-badge-action-unfollow": "unfollow_user", "click .NB-profile-badge-action-preview": "preview_user", + "click .NB-profile-badge-action-mute": "mute_user", "click .NB-profile-badge-action-approve": "approve_user", "click .NB-profile-badge-action-ignore": "ignore_user", "click .NB-profile-badge-username": "open_profile", @@ -115,7 +116,10 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({ (!profile.get('private') && $.make('div', { className: 'NB-profile-badge-action-preview NB-modal-submit-button NB-modal-submit-grey ' + (!profile.get('shared_stories_count') ? 'NB-disabled' : '') - }, 'Preview')) + }, 'Preview')), + ($.make('div', { + className: 'NB-profile-badge-action-mute NB-modal-submit-button NB-modal-submit-grey' + }, $.make('span', (profile.get('muted') ? 'Unmute' : 'Mute')))) ]); } else { $actions = $.make('div', { className: 'NB-profile-badge-action-buttons' }, [ @@ -127,7 +131,10 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({ $.make('div', { className: 'NB-profile-badge-action-preview NB-modal-submit-button NB-modal-submit-grey ' + (!profile.get('shared_stories_count') ? 'NB-disabled' : '') - }, 'Preview') + }, 'Preview'), + $.make('div', { + className: 'NB-profile-badge-action-mute NB-modal-submit-button NB-modal-submit-grey ' + }, $.make('span', (profile.get('muted') ? 'Unmute' : 'Mute'))) ]); } this.$('.NB-profile-badge-actions').append($actions); @@ -206,6 +213,32 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({ }, this)); }, + mute_user: function () { + if (this.model.get('muted')) { + return this.unmute_user(); + } + + this.$('.NB-loading').addClass('NB-active'); + NEWSBLUR.assets.mute_user(this.model.get('user_id'), _.bind(function(data) { + this.model.set('muted', true); + + this.$('.NB-loading').removeClass('NB-active'); + var $button = this.$('.NB-profile-badge-action-mute'); + $button.find('span').text('Muted'); + }, this)); + }, + + unmute_user: function() { + this.$('.NB-loading').addClass('NB-active'); + NEWSBLUR.assets.unmute_user(this.model.get('user_id'), _.bind(function(data) { + this.model.set('muted', false); + + this.$('.NB-loading').removeClass('NB-active'); + var $button = this.$('.NB-profile-badge-action-mute'); + $button.find('span').text('Unmuted'); + }, this)); + }, + preview_user: function() { if (this.$('.NB-profile-badge-action-preview').hasClass('NB-disabled')) return; var open_preview = _.bind(function() { @@ -220,7 +253,7 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({ $.modal.close(open_preview); } }, - + open_profile: function() { var user_id = this.model.get('user_id'); NEWSBLUR.reader.model.add_user_profiles([this.model]); @@ -260,4 +293,4 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({ NEWSBLUR.reader.open_user_admin_modal({user: this.model}); } -}); \ No newline at end of file +}); diff --git a/templates/static/api.yml b/templates/static/api.yml index 949bd98d3..e2fc05ae2 100644 --- a/templates/static/api.yml +++ b/templates/static/api.yml @@ -777,6 +777,26 @@ desc: "ID of user to unfollow." required: true example: "42" + - url: /social/mute_user + method: POST + short_desc: "Mute a user to not see thier shares or replies." + long_desc: + - "Mute a user to not see thier shares or replies." + params: + - key: user_id + desc: "ID of user to mute." + required: true + example: "42" + - url: /social/unmute_user + method: POST + short_desc: "Un-mute a user" + long_desc: + - "Un-mute a user to see thier shares and replies." + params: + - key: user_id + desc: "ID of user to unmute." + required: true + example: "42" - url: /social/feed_trainer method: GET short_desc: "Get the intelligence classifiers for a blurblog." From 205d35b932dd1e7c59830003cce10116929fcf70 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Mon, 25 Oct 2021 19:46:15 -0400 Subject: [PATCH 13/23] Removing unused readability unparseable error. --- apps/rss_feeds/text_importer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/rss_feeds/text_importer.py b/apps/rss_feeds/text_importer.py index 24927d165..ba991800a 100644 --- a/apps/rss_feeds/text_importer.py +++ b/apps/rss_feeds/text_importer.py @@ -137,7 +137,7 @@ class TextImporter: positive_keywords="post, entry, postProp, article, postContent, postField") try: content = original_text_doc.summary(html_partial=True) - except (readability.Unparseable, ParserError) as e: + except (ParserError) as e: logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: %s" % e) return From bf70f2aaf779bc132958302d571909ea53bd6d5c Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Mon, 25 Oct 2021 21:23:53 -0400 Subject: [PATCH 14/23] Redis replica user --- ansible/roles/redis/handlers/main.yml | 5 +++++ ansible/roles/redis/tasks/main.yml | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ansible/roles/redis/handlers/main.yml b/ansible/roles/redis/handlers/main.yml index 4d5df5c2b..d5defa4bb 100644 --- a/ansible/roles/redis/handlers/main.yml +++ b/ansible/roles/redis/handlers/main.yml @@ -7,3 +7,8 @@ name: consul state: reloaded listen: reload consul + +- name: restart redis + become: yes + command: docker restart redis + listen: restart redis diff --git a/ansible/roles/redis/tasks/main.yml b/ansible/roles/redis/tasks/main.yml index 0ac9e33d2..92b9ccbab 100644 --- a/ansible/roles/redis/tasks/main.yml +++ b/ansible/roles/redis/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Template redis_replica.conf file template: - src: /srv/newsblur/docker/redis_replica.conf.j2 + src: /srv/newsblur/docker/redis/redis_replica.conf.j2 dest: /srv/newsblur/docker/redis/redis_replica.conf notify: restart redis register: updated_config @@ -25,6 +25,7 @@ ports: - 6379:6379 restart_policy: unless-stopped + user: 1000:1001 volumes: - /srv/newsblur/docker/volumes/redis:/data - /srv/newsblur/docker/redis/redis.conf:/usr/local/etc/redis/redis_server.conf From c9075cc151bed9255bbe7469139269e8c7b0e3f3 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 26 Oct 2021 08:15:05 -0400 Subject: [PATCH 15/23] Better error handling for mercury reader. --- node/newsblur.coffee | 2 +- node/original_text.coffee | 5 +++++ node/original_text.js | 6 ++++++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/node/newsblur.coffee b/node/newsblur.coffee index f9bcf2007..b10883b98 100644 --- a/node/newsblur.coffee +++ b/node/newsblur.coffee @@ -40,7 +40,7 @@ if ENV_PROD app.get "/debug", (req, res) -> throw new Error("Debugging Sentry") - app.use(Sentry.Handlers.errorHandler()) + app.use Sentry.Handlers.errorHandler() log.debug "Setting up Sentry debugging: #{process.env.SENTRY_DSN.substr(0, 20)}..." log.debug "Starting NewsBlur Node Server: #{process.env.SERVER_NAME || 'localhost'}" diff --git a/node/original_text.coffee b/node/original_text.coffee index ec7a1b612..0a086af69 100644 --- a/node/original_text.coffee +++ b/node/original_text.coffee @@ -31,5 +31,10 @@ original_text = (app) => Mercury.parse(url).then (result) => log.debug "Fetched: #{url}" res.end JSON.stringify result + .catch (error) => + log.debug "Failed to fetch: #{url}: #{error}" + throw new Error("Failed to fetch: #{url}: #{error}") + return res.end JSON.stringify error: "Failed to fetch #{url}: #{error}" + exports.original_text = original_text diff --git a/node/original_text.js b/node/original_text.js index 8901b3afd..c5a8c6a77 100644 --- a/node/original_text.js +++ b/node/original_text.js @@ -41,6 +41,12 @@ return Mercury.parse(url).then((result) => { log.debug(`Fetched: ${url}`); return res.end(JSON.stringify(result)); + }).catch((error) => { + log.debug(`Failed to fetch: ${url}: ${error}`); + throw new Error(`Failed to fetch: ${url}: ${error}`); + return res.end(JSON.stringify({ + error: `Failed to fetch ${url}: ${error}` + })); }); }); }; From 62a7b1255c435391cfcf35cfc795374b0f813869 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 26 Oct 2021 08:15:11 -0400 Subject: [PATCH 16/23] Skipping mongo1. --- terraform/digitalocean.tf | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/terraform/digitalocean.tf b/terraform/digitalocean.tf index fe43a8d8a..0350f72a2 100644 --- a/terraform/digitalocean.tf +++ b/terraform/digitalocean.tf @@ -386,18 +386,18 @@ resource "digitalocean_droplet" "db-postgres" { } resource "digitalocean_volume" "mongo_volume" { - count = 2 + count = 1 region = "nyc1" - name = "mongo${count.index+1}" + name = "mongo${count.index+2}" size = 400 initial_filesystem_type = "xfs" description = "Storage for NewsBlur MongoDB" } resource "digitalocean_droplet" "db-mongo-primary" { - count = 2 + count = 1 image = var.droplet_os - name = "db-mongo${count.index+1}" + name = "db-mongo${count.index+2}" region = var.droplet_region size = var.mongo_droplet_size ssh_keys = [digitalocean_ssh_key.default.fingerprint] @@ -538,7 +538,7 @@ resource "digitalocean_droplet" "task-work" { image = var.droplet_os name = "task-work${count.index+1}" region = var.droplet_region - size = var.droplet_size + size = var.droplet_size_10 ssh_keys = [digitalocean_ssh_key.default.fingerprint] provisioner "local-exec" { command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120" From d1e37c27c061e1fe5778f685b7cbfd9ba6262843 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 26 Oct 2021 08:22:29 -0400 Subject: [PATCH 17/23] Buffer() to Buffer.from --- node/favicons.coffee | 2 +- node/favicons.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/node/favicons.coffee b/node/favicons.coffee index ae1393e46..2f9e65235 100644 --- a/node/favicons.coffee +++ b/node/favicons.coffee @@ -62,7 +62,7 @@ favicons = (app) => if ENV_DEBUG log.debug "Req: #{feed_id}, etag: #{etag}/#{docs?.color} " + if err then "(err: #{err})" else "" res.header 'etag', docs.color - body = new Buffer(docs.data, 'base64') + body = Buffer.from(docs.data, 'base64') res.set("Content-Type", "image/png") res.status(200).send body else diff --git a/node/favicons.js b/node/favicons.js index 55047a181..f03b3fafe 100644 --- a/node/favicons.js +++ b/node/favicons.js @@ -78,7 +78,7 @@ log.debug(`Req: ${feed_id}, etag: ${etag}/${docs != null ? docs.color : void 0} ` + (err ? `(err: ${err})` : "")); } res.header('etag', docs.color); - body = new Buffer(docs.data, 'base64'); + body = Buffer.from(docs.data, 'base64'); res.set("Content-Type", "image/png"); return res.status(200).send(body); } else { From 2c24866fe0f7225f3a30b2dc1a58f3ab9fd0826b Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 26 Oct 2021 08:29:25 -0400 Subject: [PATCH 18/23] Bumping errors from sentry to user. --- node/original_text.coffee | 2 +- node/original_text.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/node/original_text.coffee b/node/original_text.coffee index 0a086af69..3af479e8a 100644 --- a/node/original_text.coffee +++ b/node/original_text.coffee @@ -33,7 +33,7 @@ original_text = (app) => res.end JSON.stringify result .catch (error) => log.debug "Failed to fetch: #{url}: #{error}" - throw new Error("Failed to fetch: #{url}: #{error}") + # throw new Error("Failed to fetch: #{url}: #{error}") return res.end JSON.stringify error: "Failed to fetch #{url}: #{error}" diff --git a/node/original_text.js b/node/original_text.js index c5a8c6a77..c036cf273 100644 --- a/node/original_text.js +++ b/node/original_text.js @@ -43,7 +43,7 @@ return res.end(JSON.stringify(result)); }).catch((error) => { log.debug(`Failed to fetch: ${url}: ${error}`); - throw new Error(`Failed to fetch: ${url}: ${error}`); + // throw new Error("Failed to fetch: #{url}: #{error}") return res.end(JSON.stringify({ error: `Failed to fetch ${url}: ${error}` })); From 81ffcc52dc58380d2c2f5d5efeef700bddc76dba Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 26 Oct 2021 08:33:50 -0400 Subject: [PATCH 19/23] Switching node from .send to .sendStatus. --- node/original_page.coffee | 6 +++--- node/original_page.js | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/node/original_page.coffee b/node/original_page.coffee index 44ff3f812..707bcdf27 100644 --- a/node/original_page.coffee +++ b/node/original_page.coffee @@ -25,12 +25,12 @@ original_page = (app) => log.debug "Loading: #{feedId} (#{filePath}). " + "#{if exists then "" else "NOT FOUND"}" if not exists - return res.send 404 + return res.sendStatus 404 fs.stat filePath, (err, stats) -> if not err and etag and stats.mtime == etag - return res.send 304 + return res.sendStatus 304 if not err and lastModified and stats.mtime == lastModified - return res.send 304 + return res.sendStatus 304 fs.readFile filePath, (err, content) -> res.header 'Etag', Date.parse(stats.mtime) diff --git a/node/original_page.js b/node/original_page.js index 9da74e6c3..35789da05 100644 --- a/node/original_page.js +++ b/node/original_page.js @@ -30,14 +30,14 @@ return fs.exists(filePath, function(exists, err) { log.debug(`Loading: ${feedId} (${filePath}). ` + `${exists ? "" : "NOT FOUND"}`); if (!exists) { - return res.send(404); + return res.sendStatus(404); } return fs.stat(filePath, function(err, stats) { if (!err && etag && stats.mtime === etag) { - return res.send(304); + return res.sendStatus(304); } if (!err && lastModified && stats.mtime === lastModified) { - return res.send(304); + return res.sendStatus(304); } return fs.readFile(filePath, function(err, content) { res.header('Etag', Date.parse(stats.mtime)); From 403ce988e4f465127d7f82c03ad5f5b2138dbd27 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 26 Oct 2021 08:42:49 -0400 Subject: [PATCH 20/23] Adding server name to node.js sentry. --- node/newsblur.coffee | 2 ++ node/newsblur.js | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/node/newsblur.coffee b/node/newsblur.coffee index b10883b98..ad19a347f 100644 --- a/node/newsblur.coffee +++ b/node/newsblur.coffee @@ -3,6 +3,7 @@ Tracing = require "@sentry/tracing" app = require('express')() server = require('http').createServer(app) log = require './log.js' +os = require 'os' envresult = require('dotenv').config({path: 'node/.env'}) if envresult.error # throw envresult.error @@ -28,6 +29,7 @@ if ENV_PROD dsn: process.env.SENTRY_DSN, debug: true, tracesSampleRate: 1.0 + serverName: os.hostname() app.use Sentry.Handlers.requestHandler() diff --git a/node/newsblur.js b/node/newsblur.js index 3ff55dd99..781f235c8 100644 --- a/node/newsblur.js +++ b/node/newsblur.js @@ -1,6 +1,6 @@ // Generated by CoffeeScript 2.5.1 (function() { - var ENV_DEV, ENV_DOCKER, ENV_PROD, Sentry, Tracing, app, envresult, favicons, log, original_page, original_text, server, unread_counts; + var ENV_DEV, ENV_DOCKER, ENV_PROD, Sentry, Tracing, app, envresult, favicons, log, original_page, original_text, os, server, unread_counts; Sentry = require("@sentry/node"); @@ -12,6 +12,8 @@ log = require('./log.js'); + os = require('os'); + envresult = require('dotenv').config({ path: 'node/.env' }); @@ -47,7 +49,8 @@ Sentry.init({ dsn: process.env.SENTRY_DSN, debug: true, - tracesSampleRate: 1.0 + tracesSampleRate: 1.0, + serverName: os.hostname() }); app.use(Sentry.Handlers.requestHandler()); } From 023dd72bc7b18a2974ca56934c04ce3251a803b8 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 26 Oct 2021 08:45:05 -0400 Subject: [PATCH 21/23] Using .env server_name --- node/newsblur.coffee | 3 +-- node/newsblur.js | 6 ++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/node/newsblur.coffee b/node/newsblur.coffee index ad19a347f..e225ac9c7 100644 --- a/node/newsblur.coffee +++ b/node/newsblur.coffee @@ -3,7 +3,6 @@ Tracing = require "@sentry/tracing" app = require('express')() server = require('http').createServer(app) log = require './log.js' -os = require 'os' envresult = require('dotenv').config({path: 'node/.env'}) if envresult.error # throw envresult.error @@ -29,7 +28,7 @@ if ENV_PROD dsn: process.env.SENTRY_DSN, debug: true, tracesSampleRate: 1.0 - serverName: os.hostname() + serverName: process.env.SERVER_NAME app.use Sentry.Handlers.requestHandler() diff --git a/node/newsblur.js b/node/newsblur.js index 781f235c8..341c105fa 100644 --- a/node/newsblur.js +++ b/node/newsblur.js @@ -1,6 +1,6 @@ // Generated by CoffeeScript 2.5.1 (function() { - var ENV_DEV, ENV_DOCKER, ENV_PROD, Sentry, Tracing, app, envresult, favicons, log, original_page, original_text, os, server, unread_counts; + var ENV_DEV, ENV_DOCKER, ENV_PROD, Sentry, Tracing, app, envresult, favicons, log, original_page, original_text, server, unread_counts; Sentry = require("@sentry/node"); @@ -12,8 +12,6 @@ log = require('./log.js'); - os = require('os'); - envresult = require('dotenv').config({ path: 'node/.env' }); @@ -50,7 +48,7 @@ dsn: process.env.SENTRY_DSN, debug: true, tracesSampleRate: 1.0, - serverName: os.hostname() + serverName: process.env.SERVER_NAME }); app.use(Sentry.Handlers.requestHandler()); } From 4ff6df5d3db74a6d1e445c2f537ac4c91c7506e0 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 26 Oct 2021 10:34:36 -0400 Subject: [PATCH 22/23] Removing python2.7 dependency from newsblur docker images. --- docker/monitor/Dockerfile | 1 - docker/newsblur_base_image.Dockerfile | 1 - 2 files changed, 2 deletions(-) diff --git a/docker/monitor/Dockerfile b/docker/monitor/Dockerfile index 0be565fe6..ad8709a13 100644 --- a/docker/monitor/Dockerfile +++ b/docker/monitor/Dockerfile @@ -10,7 +10,6 @@ RUN set -ex \ ' \ && buildDeps=' \ patch \ - python-dev \ gfortran \ lib32ncurses5-dev \ libblas-dev \ diff --git a/docker/newsblur_base_image.Dockerfile b/docker/newsblur_base_image.Dockerfile index a25aea238..f75d2c271 100644 --- a/docker/newsblur_base_image.Dockerfile +++ b/docker/newsblur_base_image.Dockerfile @@ -9,7 +9,6 @@ RUN set -ex \ ' \ && buildDeps=' \ patch \ - python-dev \ gfortran \ lib32ncurses5-dev \ libblas-dev \ From 02472c959e5b679861683dd20e5cf93868c87256 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Tue, 26 Oct 2021 10:34:43 -0400 Subject: [PATCH 23/23] Elasticsearch should ignore types by default. --- apps/search/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/search/models.py b/apps/search/models.py index 3dc367de7..6f6b39ce9 100644 --- a/apps/search/models.py +++ b/apps/search/models.py @@ -207,7 +207,7 @@ class SearchStory: @classmethod def doc_type(cls): - if settings.DOCKERBUILD or getattr(settings, 'ES_IGNORE_TYPE', False): + if settings.DOCKERBUILD or getattr(settings, 'ES_IGNORE_TYPE', True): return None return "%s-type" % cls.name @@ -450,7 +450,7 @@ class SearchFeed: @classmethod def doc_type(cls): - if settings.DOCKERBUILD or getattr(settings, 'ES_IGNORE_TYPE', False): + if settings.DOCKERBUILD or getattr(settings, 'ES_IGNORE_TYPE', True): return None return "%s-type" % cls.name