Merge branch 'master' into metrics

This commit is contained in:
Jonathan Math 2021-10-26 09:43:24 -05:00
commit 7e08fc137c
41 changed files with 2178 additions and 769 deletions

1
.gitignore vendored
View file

@ -51,6 +51,7 @@ docker/haproxy/haproxy.consul.cfg
# docker/haproxy/haproxy.staging.cfg # Staging doesn't use jinja templates, so no need to ignore
docker/nginx/nginx.consul.conf
docker/prometheus/prometheus.yml
docker/redis/redis_replica.conf
# ----------------------
# Android

View file

@ -16,5 +16,5 @@
- {role: 'consul-client', tags: 'consul'}
- {role: 'elasticsearch', tags: 'elasticsearch'}
- {role: 'node-exporter', tags: ['node-exporter', 'metrics']}
- {role: 'elasticsearch-exporter', tags: ['metrics', elasticsearch]}
- {role: 'elasticsearch-exporter', tags: ['metrics', 'elasticsearch']}
- {role: 'monitor', tags: 'monitor'}

View file

@ -34,9 +34,9 @@
labels:
autoheal: "true"
networks_cli_compatible: yes
network_mode: default
networks:
- name: newsblurnet
network_mode: host
# networks:
# - name: newsblurnet
command: "{{ item.command }}"
log_driver: json-file
log_options:

View file

@ -3,7 +3,7 @@
tags: consul
become: yes
template:
src: /srv/newsblur/ansible/roles/elasticsearch_exporter/templates/consul_service.json
src: /srv/newsblur/ansible/roles/elasticsearch-exporter/templates/consul_service.json
dest: /etc/consul.d/elasticsearch_exporter.json
notify:
- reload consul
@ -16,7 +16,7 @@
image: prometheuscommunity/elasticsearch-exporter:latest
pull: yes
state: started
command: '--es.uri=http://db_elasticsearch:9200'
command: '--es.uri=http://elasticsearch:9200'
hostname: "{{ inventory_hostname }}"
restart_policy: unless-stopped
container_default_behavior: no_defaults

View file

@ -0,0 +1,5 @@
---
- name: restart grafana
become: yes
command: docker restart grafana
listen: restart grafana

View file

@ -3,6 +3,7 @@
template:
src: /srv/newsblur/docker/grafana/grafana.ini.j2
dest: /srv/newsblur/docker/grafana/grafana.ini
notify: restart grafana
- name: Register grafana in consul
tags: consul
@ -33,3 +34,10 @@
- /srv/newsblur/docker/volumes/grafana_data:/var/lib/grafana
- /srv/newsblur/docker/grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml
- /srv/newsblur/docker/grafana/dashboards/:/etc/grafana/provisioning/dashboards/
- name: Restart grafana
debug:
msg: Restarting grafana
changed_when: yes
notify:
- restart grafana

View file

@ -7,3 +7,8 @@
name: consul
state: reloaded
listen: reload consul
- name: restart redis
become: yes
command: docker restart redis
listen: restart redis

View file

@ -1,28 +1,45 @@
---
- name: Template redis_replica.conf file
template:
src: /srv/newsblur/docker/redis/redis_replica.conf.j2
dest: /srv/newsblur/docker/redis/redis_replica.conf
notify: restart redis
register: updated_config
- name: Start redis docker containers
become: yes
docker_container:
name: redis
image: redis:6.2.1
image: redis:6.2.6
state: started
command: /usr/local/etc/redis/redis_server.conf
container_default_behavior: no_defaults
hostname: "{{ inventory_hostname }}"
networks_cli_compatible: yes
# network_mode: host
network_mode: default
networks:
- name: newsblurnet
aliases:
- redis
ports:
- 6379:6379
restart_policy: unless-stopped
user: 1000:1001
volumes:
- /srv/newsblur/docker/volumes/redis:/data
- /srv/newsblur/config/redis.conf:/usr/local/etc/redis_server.conf
- /srv/newsblur/config/redis_docker.conf:/etc/redis_server.conf
- /srv/newsblur/docker/redis/redis.conf:/usr/local/etc/redis/redis_server.conf
- /srv/newsblur/docker/redis/redis_replica.conf:/usr/local/etc/redis/redis_replica.conf
- /srv/newsblur/docker/volumes/redis.var.lib:/var/lib/redis
- name: Register redis in consul
tags: consul
become: yes
template:
src: consul_service.json
dest: /etc/consul.d/redis.json
notify:
- reload consul
# - name: Register redis in consul
# become: yes
# template:
# src: consul_service.json
# dest: /etc/consul.d/redis.json
# notify:
# - reload consul
# tags: consul
- name: Add sanity checkers cronjob for disk usage
become: yes

View file

@ -137,7 +137,7 @@ class TextImporter:
positive_keywords="post, entry, postProp, article, postContent, postField")
try:
content = original_text_doc.summary(html_partial=True)
except (readability.Unparseable, ParserError) as e:
except (ParserError) as e:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: %s" % e)
return

View file

@ -207,7 +207,7 @@ class SearchStory:
@classmethod
def doc_type(cls):
if settings.DOCKERBUILD or getattr(settings, 'ES_IGNORE_TYPE', False):
if settings.DOCKERBUILD or getattr(settings, 'ES_IGNORE_TYPE', True):
return None
return "%s-type" % cls.name
@ -450,7 +450,7 @@ class SearchFeed:
@classmethod
def doc_type(cls):
if settings.DOCKERBUILD or getattr(settings, 'ES_IGNORE_TYPE', False):
if settings.DOCKERBUILD or getattr(settings, 'ES_IGNORE_TYPE', True):
return None
return "%s-type" % cls.name

View file

@ -131,6 +131,8 @@ class MSocialProfile(mongo.Document):
follower_user_ids = mongo.ListField(mongo.IntField())
unfollowed_user_ids = mongo.ListField(mongo.IntField())
requested_follow_user_ids = mongo.ListField(mongo.IntField())
muting_user_ids = mongo.ListField(mongo.IntField())
muted_by_user_ids = mongo.ListField(mongo.IntField())
popular_publishers = mongo.StringField()
stories_last_month = mongo.IntField(default=0)
average_stories_per_month = mongo.IntField(default=0)
@ -145,7 +147,16 @@ class MSocialProfile(mongo.Document):
meta = {
'collection': 'social_profile',
'indexes': ['user_id', 'username', 'following_user_ids', 'follower_user_ids', 'unfollowed_user_ids', 'requested_follow_user_ids'],
'indexes': [
'user_id',
'username',
'following_user_ids',
'follower_user_ids',
'unfollowed_user_ids',
'requested_follow_user_ids',
'muting_user_ids',
'muted_by_user_ids',
],
'allow_inheritance': False,
}
@ -423,6 +434,7 @@ class MSocialProfile(mongo.Document):
if include_following_user != self.user_id:
params['followed_by_you'] = bool(self.is_followed_by_user(include_following_user))
params['following_you'] = self.is_following_user(include_following_user)
params['muted'] = include_following_user in self.muted_by_user_ids
return params
@ -688,7 +700,27 @@ class MSocialProfile(mongo.Document):
email_type='follow_request')
logging.user(user, "~BB~FM~SBSending email for follow request: %s" % follower_profile.user.username)
def mute_user(self, muting_user_id):
if muting_user_id not in self.muting_user_ids:
self.muting_user_ids.append(muting_user_id)
self.save()
muting_user_profile = MSocialProfile.get_user(muting_user_id)
if self.user_id not in muting_user_profile.muted_by_user_ids:
muting_user_profile.muted_by_user_ids.append(self.user_id)
muting_user_profile.save()
def unmute_user(self, muting_user_id):
if muting_user_id in self.muting_user_ids:
self.muting_user_ids.remove(muting_user_id)
self.save()
muting_user_profile = MSocialProfile.get_user(muting_user_id)
if self.user_id in muting_user_profile.muted_by_user_ids:
muting_user_profile.muted_by_user_ids.remove(self.user_id)
muting_user_profile.save()
def save_feed_story_history_statistics(self):
"""
Fills in missing months between earlier occurances and now.
@ -2038,17 +2070,20 @@ class MSharedStory(mongo.DynamicDocument):
profile_user_ids = profile_user_ids.union(comments['liking_users'])
profiles = MSocialProfile.objects.filter(user_id__in=list(profile_user_ids))
profiles = [profile.canonical(compact=True) for profile in profiles]
# Toss public comments by private profiles
# Toss public comments by private profiles and muted users
profiles_dict = dict((profile['user_id'], profile) for profile in profiles)
for story in stories:
commented_by_public = story.get('commented_by_public') or [c['user_id'] for c in story['public_comments']]
for user_id in commented_by_public:
if profiles_dict[user_id]['private']:
story['public_comments'] = [c for c in story['public_comments'] if c['user_id'] != user_id]
for comment_user_id in commented_by_public:
private = profiles_dict[comment_user_id].private
muted = user_id in profiles_dict[comment_user_id].muted_by_user_ids
if private or muted:
story['public_comments'] = [c for c in story['public_comments'] if c['user_id'] != comment_user_id]
story['comment_count_public'] -= 1
profiles = [profile.canonical(compact=True) for profile in profiles]
return stories, profiles
@staticmethod

View file

@ -18,6 +18,8 @@ urlpatterns = [
url(r'^unfollow/?$', views.unfollow, name='social-unfollow'),
url(r'^approve_follower/?$', views.approve_follower, name='social-approve-follower'),
url(r'^ignore_follower/?$', views.ignore_follower, name='social-ignore-follower'),
url(r'^mute_user/?$', views.mute_user, name='social-mute-user'),
url(r'^unmute_user/?$', views.unmute_user, name='social-unmute-user'),
url(r'^feed_trainer', views.social_feed_trainer, name='social-feed-trainer'),
url(r'^public_comments/?$', views.story_public_comments, name='story-public-comments'),
url(r'^save_comment_reply/?$', views.save_comment_reply, name='social-save-comment-reply'),

View file

@ -1178,6 +1178,42 @@ def ignore_follower(request):
return {'code': code}
@ajax_login_required
@required_params('user_id', method="POST")
@json.json_view
def mute_user(request):
profile = MSocialProfile.get_user(request.user.pk)
muting_user_id = int(request.POST['user_id'])
social_profile = MSocialProfile.get_user(request.user.pk)
muting_profile = MSocialProfile.get_user(muting_user_id)
code = 1
logging.user(request, "~FMMuting user ~SB%s" % muting_profile.username)
social_profile.mute_user(muting_user_id)
return {
'code': code,
'user_profile': social_profile.canonical(),
}
@ajax_login_required
@required_params('user_id', method="POST")
@json.json_view
def unmute_user(request):
profile = MSocialProfile.get_user(request.user.pk)
muting_user_id = int(request.POST['user_id'])
muting_profile = MSocialProfile.get_user(muting_user_id)
code = 1
logging.user(request, "~FM~SBUn-~SN~FMMuting user ~SB%s" % muting_profile.username)
profile.unmute_user(muting_user_id)
return {
'code': code,
'user_profile': profile.canonical(),
}
@required_params('query', method="GET")
@json.json_view

View file

@ -124,7 +124,7 @@ backend postgres
option httpchk GET /db_check/postgres
server postgres-db01 db_pgsql:5000 check inter 2000ms
backend mongo
option httpchk GET /db_check/mongo?haproxy=1
option httpchk GET /db_check/mongo
server mongo-db22 db_mongo:5000 check inter 2000ms
backend redis
option httpchk GET /db_check/redis

View file

@ -146,7 +146,7 @@ backend postgres
server postgres-db02 db_pgsql:5000 check inter 2000ms
backend mongo
option httpchk GET /db_check/mongo?haproxy=1
option httpchk GET /db_check/mongo
server mongo-db20d db20d:5000 check inter 2000ms
server mongo-db22 db22:5000 check inter 2000ms
server mongo-db23a db23a:5000 check inter 2000ms

View file

@ -1,68 +0,0 @@
### BEGIN INIT INFO
# Provides: redis-server
# Required-Start: $syslog $remote_fs
# Required-Stop: $syslog $remote_fs
# Should-Start: $local_fs
# Should-Stop: $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: redis-server - Persistent key-value db
# Description: redis-server - Persistent key-value db
### END INIT INFO
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
DAEMON=/usr/local/bin/redis-server
DAEMON_ARGS=/etc/redis.conf
NAME=redis-server
DESC=redis-server
PIDFILE=/var/run/redis.pid
test -x $DAEMON || exit 0
set -e
case "$1" in
start)
echo -n "Starting $DESC: "
touch $PIDFILE
if start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- $DAEMON_ARGS
then
echo "$NAME."
else
echo "failed"
fi
;;
stop)
echo -n "Stopping $DESC: "
if start-stop-daemon --stop --retry 10 --quiet --oknodo --pidfile $PIDFILE --exec $DAEMON
then
echo "$NAME."
else
echo "failed"
fi
rm -f $PIDFILE
;;
restart|force-reload)
${0} stop
${0} start
;;
status)
echo -n "$DESC is "
if start-stop-daemon --stop --quiet --signal 0 --name ${NAME} --pidfile ${PIDFILE}
then
echo "running"
else
echo "not running"
exit 1
fi
;;
*)
echo "Usage: /etc/init.d/$NAME {start|stop|restart|force-reload}" >&2
exit 1
;;
esac
exit 0

View file

@ -1,599 +0,0 @@
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize no
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
#pidfile /var/run/redis.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6379
# If you want you can bind a single interface, if the bind option is not
# specified all the interfaces will listen for incoming connections.
#
# bind 127.0.0.1
# Specify the path for the unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 755
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
# logfile stdout
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING #################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in an hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# distater will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usually even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error no
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir /var/lib/redis
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
protected-mode no
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only no
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
repl-timeout 600
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one wtih priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
maxclients 100000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# accordingly to the eviction policy selected (see maxmemmory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# an hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
# appendfilename appendonly.aof
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceed the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write commands was
# already issue by the script but the user don't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 1000
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 1000000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 1024
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients
# slave -> slave clients and MONITOR clients
# pubsub -> clients subcribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 1024mb 512mb 360
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeot, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are perforemd with the same frequency, but Redis checks for
# tasks to perform accordingly to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf
#include /etc/redis/redis_server.conf

View file

@ -1,8 +0,0 @@
if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
echo never > /sys/kernel/mm/transparent_hugepage/enabled
fi
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then
echo never > /sys/kernel/mm/transparent_hugepage/defrag
fi
exit 0;

View file

@ -1 +0,0 @@
slaveof db_redis 6379

View file

@ -93,7 +93,7 @@ services:
- ./docker/volumes/postgres:/var/lib/postgresql/data
db_redis:
image: redis:3.2.6
image: redis:latest
ports:
- 6579:6579
container_name: db_redis

View file

@ -167,7 +167,7 @@ backend postgres
server db-postgres db-postgres.node.nyc1.consul:5579 check inter 2000ms resolvers consul resolve-prefer ipv4 resolve-opts allow-dup-ip init-addr none
backend mongo
option httpchk GET /db_check/mongo?haproxy=1
option httpchk GET /db_check/mongo
default-server check inter 2000ms resolvers consul resolve-prefer ipv4 resolve-opts allow-dup-ip init-addr none
{% for host in groups.mongo %}
server {{host}} {{host}}.node.nyc1.consul:5579

View file

@ -10,7 +10,6 @@ RUN set -ex \
' \
&& buildDeps=' \
patch \
python-dev \
gfortran \
lib32ncurses5-dev \
libblas-dev \

View file

@ -9,7 +9,6 @@ RUN set -ex \
' \
&& buildDeps=' \
patch \
python-dev \
gfortran \
lib32ncurses5-dev \
libblas-dev \

1879
docker/redis/redis.conf Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1 @@
replicaof {{ inventory_hostname|regex_replace('\d+', '') }}.service.nyc1.consul 6379

View file

@ -84,39 +84,41 @@ def db_check_mongo():
return str(1)
except:
abort(503)
if request.args.get('haproxy') == '1':
try:
stories = db.stories.estimated_document_count()
except (pymongo.errors.NotMasterError, pymongo.errors.ServerSelectionTimeoutError):
abort(504)
except pymongo.errors.OperationFailure as e:
if 'Authentication failed' in str(e):
abort(505)
try:
stories = db.stories.estimated_document_count()
except pymongo.errors.NotMasterError:
abort(504)
except pymongo.errors.ServerSelectionTimeoutError:
abort(505)
except pymongo.errors.OperationFailure as e:
if 'Authentication failed' in str(e):
abort(506)
if not stories:
abort(510)
abort(507)
status = client.admin.command('replSetGetStatus')
members = status['members']
primary_optime = None
oldest_secondary_optime = None
for member in members:
member_state = member['state']
optime = member['optime']
if member_state == PRIMARY_STATE:
primary_optime = optime['ts'].time
elif member_state == SECONDARY_STATE:
if not oldest_secondary_optime or optime['ts'].time < oldest_secondary_optime:
oldest_secondary_optime = optime['ts'].time
if not stories:
abort(510)
status = client.admin.command('replSetGetStatus')
members = status['members']
primary_optime = None
oldest_secondary_optime = None
for member in members:
member_state = member['state']
optime = member['optime']
if member_state == PRIMARY_STATE:
primary_optime = optime['ts'].time
elif member_state == SECONDARY_STATE:
if not oldest_secondary_optime or optime['ts'].time < oldest_secondary_optime:
oldest_secondary_optime = optime['ts'].time
if not primary_optime or not oldest_secondary_optime:
abort(511)
if not primary_optime or not oldest_secondary_optime:
abort(511)
# if primary_optime - oldest_secondary_optime > 100:
# abort(512)
# if primary_optime - oldest_secondary_optime > 100:
# abort(512)
return str(stories)
return str(stories)
@app.route("/db_check/mongo_analytics")
def db_check_mongo_analytics():

View file

@ -11998,6 +11998,7 @@ form.opml_import_form input {
}
.NB-profile-badge-actions .NB-profile-badge-action-preview,
.NB-badge-actions .NB-badge-action-add,
.NB-profile-badge-actions .NB-profile-badge-action-mute,
.NB-profile-badge-actions .NB-profile-badge-action-ignore {
color: #404040;
line-height: 1;

View file

@ -1861,6 +1861,20 @@ NEWSBLUR.AssetModel = Backbone.Router.extend({
callback(data);
}, this));
},
mute_user: function (user_id, callback) {
this.make_request('/social/mute_user', {'user_id': user_id}, _.bind(function(data) {
this.user_profile.set(data.user_profile);
callback(data);
}, this));
},
unmute_user: function (user_id, callback) {
this.make_request('/social/unmute_user', {'user_id': user_id}, _.bind(function(data) {
this.user_profile.set(data.user_profile);
callback(data);
}, this));
},
approve_follower: function(user_id, callback) {
this.make_request('/social/approve_follower', {'user_id': user_id}, _.bind(function(data) {

View file

@ -6,6 +6,7 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({
"click .NB-profile-badge-action-follow": "follow_user",
"click .NB-profile-badge-action-unfollow": "unfollow_user",
"click .NB-profile-badge-action-preview": "preview_user",
"click .NB-profile-badge-action-mute": "mute_user",
"click .NB-profile-badge-action-approve": "approve_user",
"click .NB-profile-badge-action-ignore": "ignore_user",
"click .NB-profile-badge-username": "open_profile",
@ -115,7 +116,10 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({
(!profile.get('private') && $.make('div', {
className: 'NB-profile-badge-action-preview NB-modal-submit-button NB-modal-submit-grey ' +
(!profile.get('shared_stories_count') ? 'NB-disabled' : '')
}, 'Preview'))
}, 'Preview')),
($.make('div', {
className: 'NB-profile-badge-action-mute NB-modal-submit-button NB-modal-submit-grey'
}, $.make('span', (profile.get('muted') ? 'Unmute' : 'Mute'))))
]);
} else {
$actions = $.make('div', { className: 'NB-profile-badge-action-buttons' }, [
@ -127,7 +131,10 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({
$.make('div', {
className: 'NB-profile-badge-action-preview NB-modal-submit-button NB-modal-submit-grey ' +
(!profile.get('shared_stories_count') ? 'NB-disabled' : '')
}, 'Preview')
}, 'Preview'),
$.make('div', {
className: 'NB-profile-badge-action-mute NB-modal-submit-button NB-modal-submit-grey '
}, $.make('span', (profile.get('muted') ? 'Unmute' : 'Mute')))
]);
}
this.$('.NB-profile-badge-actions').append($actions);
@ -206,6 +213,32 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({
}, this));
},
mute_user: function () {
if (this.model.get('muted')) {
return this.unmute_user();
}
this.$('.NB-loading').addClass('NB-active');
NEWSBLUR.assets.mute_user(this.model.get('user_id'), _.bind(function(data) {
this.model.set('muted', true);
this.$('.NB-loading').removeClass('NB-active');
var $button = this.$('.NB-profile-badge-action-mute');
$button.find('span').text('Muted');
}, this));
},
unmute_user: function() {
this.$('.NB-loading').addClass('NB-active');
NEWSBLUR.assets.unmute_user(this.model.get('user_id'), _.bind(function(data) {
this.model.set('muted', false);
this.$('.NB-loading').removeClass('NB-active');
var $button = this.$('.NB-profile-badge-action-mute');
$button.find('span').text('Unmuted');
}, this));
},
preview_user: function() {
if (this.$('.NB-profile-badge-action-preview').hasClass('NB-disabled')) return;
var open_preview = _.bind(function() {
@ -220,7 +253,7 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({
$.modal.close(open_preview);
}
},
open_profile: function() {
var user_id = this.model.get('user_id');
NEWSBLUR.reader.model.add_user_profiles([this.model]);
@ -260,4 +293,4 @@ NEWSBLUR.Views.SocialProfileBadge = Backbone.View.extend({
NEWSBLUR.reader.open_user_admin_modal({user: this.model});
}
});
});

View file

@ -62,7 +62,7 @@ favicons = (app) =>
if ENV_DEBUG
log.debug "Req: #{feed_id}, etag: #{etag}/#{docs?.color} " + if err then "(err: #{err})" else ""
res.header 'etag', docs.color
body = new Buffer(docs.data, 'base64')
body = Buffer.from(docs.data, 'base64')
res.set("Content-Type", "image/png")
res.status(200).send body
else

View file

@ -78,7 +78,7 @@
log.debug(`Req: ${feed_id}, etag: ${etag}/${docs != null ? docs.color : void 0} ` + (err ? `(err: ${err})` : ""));
}
res.header('etag', docs.color);
body = new Buffer(docs.data, 'base64');
body = Buffer.from(docs.data, 'base64');
res.set("Content-Type", "image/png");
return res.status(200).send(body);
} else {

View file

@ -8,7 +8,8 @@ if envresult.error
# throw envresult.error
envresult = require('dotenv').config()
if envresult.error
throw envresult.error
log.debug " ---> No .env file found, using defaults"
# throw envresult.error
ENV_DEV = process.env.NODE_ENV == 'development'
ENV_PROD = process.env.NODE_ENV == 'production'
@ -27,19 +28,20 @@ if ENV_PROD
dsn: process.env.SENTRY_DSN,
debug: true,
tracesSampleRate: 1.0
serverName: process.env.SERVER_NAME
app.use(Sentry.Handlers.requestHandler())
app.use Sentry.Handlers.requestHandler()
original_page(app)
original_text(app)
favicons(app)
unread_counts(server)
original_page app
original_text app
favicons app
unread_counts server
if ENV_PROD
app.get "/debug", (req, res) ->
throw new Error("Debugging Sentry")
app.use(Sentry.Handlers.errorHandler())
app.use Sentry.Handlers.errorHandler()
log.debug "Setting up Sentry debugging: #{process.env.SENTRY_DSN.substr(0, 20)}..."
log.debug "Starting NewsBlur Node Server: #{process.env.SERVER_NAME || 'localhost'}"

View file

@ -20,11 +20,11 @@
// throw envresult.error
envresult = require('dotenv').config();
if (envresult.error) {
log.debug(`There is no .env file. Continuing...`);
//throw envresult.error;
log.debug(" ---> No .env file found, using defaults");
}
}
// throw envresult.error
ENV_DEV = process.env.NODE_ENV === 'development';
ENV_PROD = process.env.NODE_ENV === 'production';
@ -47,7 +47,8 @@
Sentry.init({
dsn: process.env.SENTRY_DSN,
debug: true,
tracesSampleRate: 1.0
tracesSampleRate: 1.0,
serverName: process.env.SERVER_NAME
});
app.use(Sentry.Handlers.requestHandler());
}

View file

@ -25,12 +25,12 @@ original_page = (app) =>
log.debug "Loading: #{feedId} (#{filePath}). " +
"#{if exists then "" else "NOT FOUND"}"
if not exists
return res.send 404
return res.sendStatus 404
fs.stat filePath, (err, stats) ->
if not err and etag and stats.mtime == etag
return res.send 304
return res.sendStatus 304
if not err and lastModified and stats.mtime == lastModified
return res.send 304
return res.sendStatus 304
fs.readFile filePath, (err, content) ->
res.header 'Etag', Date.parse(stats.mtime)

View file

@ -30,14 +30,14 @@
return fs.exists(filePath, function(exists, err) {
log.debug(`Loading: ${feedId} (${filePath}). ` + `${exists ? "" : "NOT FOUND"}`);
if (!exists) {
return res.send(404);
return res.sendStatus(404);
}
return fs.stat(filePath, function(err, stats) {
if (!err && etag && stats.mtime === etag) {
return res.send(304);
return res.sendStatus(304);
}
if (!err && lastModified && stats.mtime === lastModified) {
return res.send(304);
return res.sendStatus(304);
}
return fs.readFile(filePath, function(err, content) {
res.header('Etag', Date.parse(stats.mtime));

View file

@ -31,5 +31,10 @@ original_text = (app) =>
Mercury.parse(url).then (result) =>
log.debug "Fetched: #{url}"
res.end JSON.stringify result
.catch (error) =>
log.debug "Failed to fetch: #{url}: #{error}"
# throw new Error("Failed to fetch: #{url}: #{error}")
return res.end JSON.stringify error: "Failed to fetch #{url}: #{error}"
exports.original_text = original_text

View file

@ -41,6 +41,12 @@
return Mercury.parse(url).then((result) => {
log.debug(`Fetched: ${url}`);
return res.end(JSON.stringify(result));
}).catch((error) => {
log.debug(`Failed to fetch: ${url}: ${error}`);
// throw new Error("Failed to fetch: #{url}: #{error}")
return res.end(JSON.stringify({
error: `Failed to fetch ${url}: ${error}`
}));
});
});
};

View file

@ -777,6 +777,26 @@
desc: "ID of user to unfollow."
required: true
example: "42"
- url: /social/mute_user
method: POST
short_desc: "Mute a user to not see thier shares or replies."
long_desc:
- "Mute a user to not see thier shares or replies."
params:
- key: user_id
desc: "ID of user to mute."
required: true
example: "42"
- url: /social/unmute_user
method: POST
short_desc: "Un-mute a user"
long_desc:
- "Un-mute a user to see thier shares and replies."
params:
- key: user_id
desc: "ID of user to unmute."
required: true
example: "42"
- url: /social/feed_trainer
method: GET
short_desc: "Get the intelligence classifiers for a blurblog."

View file

@ -304,7 +304,7 @@ resource "digitalocean_droplet" "db-redis-user" {
image = var.droplet_os
name = "db-redis-user"
region = var.droplet_region
size = var.droplet_size
size = var.droplet_size_40
ssh_keys = [digitalocean_ssh_key.default.fingerprint]
provisioner "local-exec" {
command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120"
@ -321,7 +321,7 @@ resource "digitalocean_droplet" "db-redis-sessions" {
image = var.droplet_os
name = "db-redis-sessions"
region = var.droplet_region
size = var.droplet_size
size = var.droplet_size_20
ssh_keys = [digitalocean_ssh_key.default.fingerprint]
provisioner "local-exec" {
command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120"
@ -338,7 +338,7 @@ resource "digitalocean_droplet" "db-redis-story" {
image = var.droplet_os
name = "db-redis-story"
region = var.droplet_region
size = var.droplet_size
size = var.redis_story_droplet_size
ssh_keys = [digitalocean_ssh_key.default.fingerprint]
provisioner "local-exec" {
command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120"
@ -386,18 +386,18 @@ resource "digitalocean_droplet" "db-postgres" {
}
resource "digitalocean_volume" "mongo_volume" {
count = 2
count = 1
region = "nyc1"
name = "mongo${count.index+1}"
name = "mongo${count.index+2}"
size = 400
initial_filesystem_type = "xfs"
description = "Storage for NewsBlur MongoDB"
}
resource "digitalocean_droplet" "db-mongo-primary" {
count = 2
count = 1
image = var.droplet_os
name = "db-mongo${count.index+1}"
name = "db-mongo${count.index+2}"
region = var.droplet_region
size = var.mongo_droplet_size
ssh_keys = [digitalocean_ssh_key.default.fingerprint]
@ -534,11 +534,11 @@ resource "digitalocean_droplet" "task-celery" {
}
resource "digitalocean_droplet" "task-work" {
count = 2
count = 3
image = var.droplet_os
name = "task-work${count.index+1}"
region = var.droplet_region
size = var.droplet_size
size = var.droplet_size_10
ssh_keys = [digitalocean_ssh_key.default.fingerprint]
provisioner "local-exec" {
command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120"

View file

@ -9,6 +9,10 @@ variable "droplet_size" {
type = string
default = "s-1vcpu-1gb"
}
variable "droplet_size_10" {
type = string
default = "s-1vcpu-2gb"
}
variable "droplet_size_20" {
type = string
@ -20,6 +24,11 @@ variable "droplet_size_120" {
default = "g-8vcpu-32gb"
}
variable "droplet_size_40" {
type = string
default = "s-4vcpu-8gb"
}
variable "droplet_os" {
type = string
default = "ubuntu-20-04-x64"
@ -55,3 +64,8 @@ variable "elasticsearch_droplet_size" {
type = string
default = "m3-2vcpu-16gb"
}
variable "redis_story_droplet_size" {
type = string
default = "m-2vcpu-16gb"
}