Merge branch 'master' into dashboard3

* master:
  Executable monitor work queue.
  Adding monitor for work queue.
  Pointing to the new ssl cert.
  More descriptive failure email
  No default value
  Spitting out error on failure.
  Handling missing values
  Needs redis
  Only warn about falling feed fetches when actually falling.
  Revert "Removing unused types key from index."
This commit is contained in:
Samuel Clay 2021-01-20 14:21:48 -05:00
commit 33508f7361
5 changed files with 80 additions and 11 deletions

View file

@ -37,6 +37,7 @@ class MUserNotificationTokens(mongo.Document):
'collection': 'notification_tokens',
'indexes': [{'fields': ['user_id'],
'unique': True,
'types': False,
}],
'allow_inheritance': False,
}

View file

@ -166,7 +166,7 @@ backend maintenance
{{ maintenance }}
listen stats
bind :1936 ssl crt newsblur.pem
bind :1936 ssl crt newsblur.com.crt
stats enable
stats hide-version
stats realm Haproxy\ Statistics

12
fabfile.py vendored
View file

@ -865,15 +865,14 @@ def copy_certificates():
run('ln -fs %s %s' % (privkey_path, os.path.join(cert_path, 'newsblur.com.crt.key'))) # HAProxy
put(os.path.join(env.SECRETS_PATH, 'certificates/comodo/dhparams.pem'), cert_path)
put(os.path.join(env.SECRETS_PATH, 'certificates/ios/aps_development.pem'), cert_path)
# Export aps.cer from Apple issued certificate using Keychain Assistant
# openssl x509 -in aps.cer -inform DER -outform PEM -out aps.pem
put(os.path.join(env.SECRETS_PATH, 'certificates/ios/aps.pem'), cert_path)
# Export aps.p12 from aps.cer using Keychain Assistant
# openssl pkcs12 -in aps.p12 -out aps.p12.pem -nodes
put(os.path.join(env.SECRETS_PATH, 'certificates/ios/aps.p12.pem'), cert_path)
# run('cat %s/newsblur.com.crt > %s/newsblur.pem' % (cert_path, cert_path))
# run('echo "\n" >> %s/newsblur.pem' % (cert_path))
# run('cat %s/newsblur.com.key >> %s/newsblur.pem' % (cert_path, cert_path))
def setup_certbot():
sudo('snap install --classic certbot')
sudo('snap set certbot trust-plugin-with-root=ok')
@ -1530,6 +1529,11 @@ def setup_newsletter_monitor():
sudo('ln -fs %s/utils/monitor_newsletter_delivery.py /etc/cron.hourly/monitor_newsletter_delivery' % env.NEWSBLUR_PATH)
sudo('/etc/cron.hourly/monitor_newsletter_delivery')
@parallel
def setup_queue_monitor():
sudo('ln -fs %s/utils/monitor_work_queue.py /etc/cron.hourly/monitor_work_queue' % env.NEWSBLUR_PATH)
sudo('/etc/cron.hourly/monitor_work_queue')
@parallel
def setup_redis_monitor():
run('sleep 5') # Wait for redis to startup so the log file is there

View file

@ -7,6 +7,7 @@ import subprocess
import requests
from newsblur import settings
import socket
import redis
import pymongo
def main():
@ -18,25 +19,34 @@ def main():
admin_email = settings.ADMINS[0][1]
failed = False
feeds_fetched = 0
FETCHES_DROP_AMOUNT = 0
redis_task_fetches = 0
monitor_key = "Monitor:task_fetches"
r = redis.Redis(connection_pool=settings.REDIS_ANALYTICS_POOL)
try:
client = pymongo.MongoClient('mongodb://%s' % settings.MONGO_DB['host'])
feeds_fetched = client.newsblur.statistics.find_one({"key": "feeds_fetched"})['value']
redis_task_fetches = int(r.get(monitor_key))
except Exception as e:
failed = e
if feeds_fetched < 5000000:
if feeds_fetched < 5000000 and feeds_fetched <= (redis_task_fetches - FETCHES_DROP_AMOUNT):
failed = True
if failed:
requests.post(
"https://api.mailgun.net/v2/%s/messages" % settings.MAILGUN_SERVER_NAME,
auth=("api", settings.MAILGUN_ACCESS_KEY),
data={"from": "NewsBlur Task Monitor: %s <admin@%s.newsblur.com>" % (hostname, hostname),
"to": [admin_email],
"subject": "%s feeds fetched falling: %s" % (hostname, feeds_fetched),
"text": "Feed fetches are falling: %s" % (feeds_fetched)})
print(" ---> Feeds fetched falling! %s" % (feeds_fetched))
"subject": "%s feeds fetched falling: %s (from %s)" % (hostname, feeds_fetched, redis_task_fetches),
"text": "Feed fetches are falling: %s (from %s)" % (feeds_fetched, redis_task_fetches)})
r.set(monitor_key, feeds_fetched)
r.expire(monitor_key, 60*60*3) # 3 hours
print(" ---> Feeds fetched falling! %s %s" % (feeds_fetched, failed))
else:
print(" ---> Feeds fetched OK: %s" % (feeds_fetched))

54
utils/monitor_work_queue.py Executable file
View file

@ -0,0 +1,54 @@
#!/srv/newsblur/venv/newsblur/bin/python
import sys
sys.path.append('/srv/newsblur')
import subprocess
import requests
from newsblur import settings
import socket
import redis
import pymongo
def main():
df = subprocess.Popen(["df", "/"], stdout=subprocess.PIPE)
output = df.communicate()[0]
device, size, used, available, percent, mountpoint = output.split("\n")[1].split()
hostname = socket.gethostname()
percent = int(percent.strip('%'))
admin_email = settings.ADMINS[0][1]
failed = False
work_queue_size = 0
QUEUE_DROP_AMOUNT = 0
redis_work_queue = 0
monitor_key = "Monitor:work_queue"
r_monitor = redis.Redis(connection_pool=settings.REDIS_ANALYTICS_POOL)
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
try:
work_queue_size = int(r.llen("work_queue"))
redis_work_queue = int(r_monitor.get(monitor_key))
except Exception as e:
failed = e
if work_queue_size > 100 and work_queue_size >= (redis_work_queue - QUEUE_DROP_AMOUNT):
failed = True
if failed:
requests.post(
"https://api.mailgun.net/v2/%s/messages" % settings.MAILGUN_SERVER_NAME,
auth=("api", settings.MAILGUN_ACCESS_KEY),
data={"from": "NewsBlur Queue Monitor: %s <admin@%s.newsblur.com>" % (hostname, hostname),
"to": [admin_email],
"subject": "%s work queue rising: %s (from %s)" % (hostname, work_queue_size, redis_work_queue),
"text": "Work queue is rising: %s (from %s)" % (work_queue_size, redis_work_queue)})
r_monitor.set(monitor_key, work_queue_size)
r_monitor.expire(monitor_key, 60*60*3) # 3 hours
print(" ---> Work queue rising! %s %s" % (work_queue_size, failed))
else:
print(" ---> Work queue OK: %s" % (work_queue_size))
if __name__ == '__main__':
main()