Readying task servers for ec2.

This commit is contained in:
Samuel Clay 2012-10-15 16:32:32 -07:00
parent 967b51c521
commit 24c649df37
10 changed files with 71 additions and 32 deletions

View file

@ -7,6 +7,6 @@ set eventqueue
slots 100 # optionally limit the queue size slots 100 # optionally limit the queue size
# If no feeds have been queued in the last 10 minutes, something is wrong # If no feeds have been queued in the last 10 minutes, something is wrong
check file newsblur.log with path /home/sclay/newsblur/logs/newsblur.log check file newsblur.log with path /srv/newsblur/logs/newsblur.log
if timestamp > 10 minutes then exec "/usr/bin/supervisorctl restart celery" if timestamp > 10 minutes then exec "/usr/bin/supervisorctl restart celery"
as uid root and gid root as uid root and gid root

View file

@ -7,13 +7,13 @@ set eventqueue
slots 100 # optionally limit the queue size slots 100 # optionally limit the queue size
# If no feeds have been fetched in the last 10 minutes, something is wrong # If no feeds have been fetched in the last 10 minutes, something is wrong
check file newsblur.log with path /home/sclay/newsblur/logs/newsblur.log check file newsblur.log with path /srv/newsblur/logs/newsblur.log
if timestamp > 10 minutes then exec "/home/sclay/newsblur/utils/kill_celery.sh" if timestamp > 10 minutes then exec "/srv/newsblur/utils/kill_celery.sh"
as uid sclay and gid sclay # as uid sclay and gid sclay
check system task_server check system task_server
if loadavg (1min) > 12 then exec "/home/sclay/newsblur/utils/kill_celery.sh" if loadavg (1min) > 12 then exec "/srv/newsblur/utils/kill_celery.sh"
as uid sclay and gid sclay # as uid sclay and gid sclay
if loadavg (5min) > 8 then exec "/home/sclay/newsblur/utils/kill_celery.sh" if loadavg (5min) > 8 then exec "/srv/newsblur/utils/kill_celery.sh"
as uid sclay and gid sclay # as uid sclay and gid sclay

View file

@ -1,6 +1,6 @@
[program:celerybeat] [program:celerybeat]
command=/home/sclay/newsblur/manage.py celerybeat --schedule=/home/sclay/newsblur/data/celerybeat-schedule.db --loglevel=INFO command=/srv/newsblur/manage.py celerybeat --schedule=/srv/newsblur/data/celerybeat-schedule.db --loglevel=INFO
directory=/home/sclay/newsblur directory=/srv/newsblur
user=sclay user=sclay
numprocs=1 numprocs=1
stdout_logfile=/var/log/celerybeat.log stdout_logfile=/var/log/celerybeat.log

View file

@ -1,6 +1,6 @@
[program:celery] [program:celery]
command=/home/sclay/newsblur/manage.py celeryd --loglevel=INFO -Q new_feeds,work_queue,push_feeds,update_feeds command=/srv/newsblur/manage.py celeryd --loglevel=INFO -Q new_feeds,work_queue,push_feeds,update_feeds
directory=/home/sclay/newsblur directory=/srv/newsblur
user=sclay user=sclay
numprocs=1 numprocs=1
stdout_logfile=/var/log/celeryd.log stdout_logfile=/var/log/celeryd.log

View file

@ -1,6 +1,6 @@
[program:celery] [program:celery]
command=/home/sclay/newsblur/manage.py celeryd --loglevel=INFO -Q beat_tasks -c 1 command=/srv/newsblur/manage.py celeryd --loglevel=INFO -Q beat_tasks -c 1
directory=/home/sclay/newsblur directory=/srv/newsblur
user=sclay user=sclay
numprocs=1 numprocs=1
stdout_logfile=/var/log/celeryd.log stdout_logfile=/var/log/celeryd.log

View file

@ -1,7 +1,7 @@
[program:gunicorn] [program:gunicorn]
#command=/home/conesus/newsblur/manage.py run_gunicorn -c gunicorn_conf.py #command=/home/conesus/newsblur/manage.py run_gunicorn -c gunicorn_conf.py
command=gunicorn_django -c config/gunicorn_conf.py command=gunicorn_django -c config/gunicorn_conf.py
directory=/home/sclay/newsblur directory=/srv/newsblur
user=sclay user=sclay
autostart=true autostart=true
autorestart=true autorestart=true

View file

@ -1,10 +1,10 @@
[program:node_favicons] [program:node_favicons]
command=node favicons.js command=node favicons.js
directory=/home/sclay/newsblur/node directory=/srv/newsblur/node
user=sclay user=sclay
autostart=true autostart=true
autorestart=true autorestart=true
#redirect_stderr=True #redirect_stderr=True
priority=991 priority=991
stopsignal=HUP stopsignal=HUP
stdout_logfile = /home/sclay/newsblur/logs/favicons.log stdout_logfile = /srv/newsblur/logs/favicons.log

View file

@ -1,10 +1,10 @@
[program:node_unread] [program:node_unread]
command=node unread_counts.js command=node unread_counts.js
directory=/home/sclay/newsblur/node directory=/srv/newsblur/node
user=sclay user=sclay
autostart=true autostart=true
autorestart=true autorestart=true
#redirect_stderr=True #redirect_stderr=True
priority=991 priority=991
stopsignal=HUP stopsignal=HUP
stdout_logfile = /home/sclay/newsblur/logs/unread_counts.log stdout_logfile = /srv/newsblur/logs/unread_counts.log

57
fabfile.py vendored
View file

@ -1,13 +1,13 @@
from fabric.api import cd, env, local, parallel, serial from fabric.api import cd, env, local, parallel, serial
from fabric.api import put, run, settings, sudo from fabric.api import put, run, settings, sudo
# from fabric.colors import red, green, blue, cyan, magenta, white, yellow # from fabric.colors import red, green, blue, cyan, magenta, white, yellow
try: from boto.s3.connection import S3Connection
from boto.s3.connection import S3Connection from boto.s3.key import Key
from boto.s3.key import Key from boto.ec2.connection import EC2Connection
except ImportError:
print " ---> Boto not installed yet. No S3 connections available."
from fabric.contrib import django from fabric.contrib import django
import os import os
import time
import sys
django.settings_module('settings') django.settings_module('settings')
try: try:
@ -356,6 +356,8 @@ def add_machine_to_ssh():
def setup_repo(): def setup_repo():
with settings(warn_only=True): with settings(warn_only=True):
run('git clone https://github.com/samuelclay/NewsBlur.git newsblur') run('git clone https://github.com/samuelclay/NewsBlur.git newsblur')
sudo('mkdir -p /srv')
sudo('ln -s /home/ubuntu/newsblur /srv/newsblur')
def setup_repo_local_settings(): def setup_repo_local_settings():
with cd(env.NEWSBLUR_PATH): with cd(env.NEWSBLUR_PATH):
@ -606,10 +608,14 @@ def setup_db_firewall():
sudo('ufw delete allow from 23.22.0.0/16 to any port 27017') # MongoDB sudo('ufw delete allow from 23.22.0.0/16 to any port 27017') # MongoDB
sudo('ufw delete allow from 23.22.0.0/16 to any port 6379 ') # Redis sudo('ufw delete allow from 23.22.0.0/16 to any port 6379 ') # Redis
sudo('ufw delete allow from 23.22.0.0/16 to any port 11211 ') # Memcached sudo('ufw delete allow from 23.22.0.0/16 to any port 11211 ') # Memcached
sudo('ufw allow from 23.20.0.0/16 to any port 5432 ') # PostgreSQL sudo('ufw delete allow from 23.20.0.0/16 to any port 5432 ') # PostgreSQL
sudo('ufw allow from 23.20.0.0/16 to any port 27017') # MongoDB sudo('ufw delete allow from 23.20.0.0/16 to any port 27017') # MongoDB
sudo('ufw allow from 23.20.0.0/16 to any port 6379 ') # Redis sudo('ufw delete allow from 23.20.0.0/16 to any port 6379 ') # Redis
sudo('ufw allow from 23.20.0.0/16 to any port 11211 ') # Memcached sudo('ufw delete allow from 23.20.0.0/16 to any port 11211 ') # Memcached
sudo('ufw allow from 54.242.38.48/20 to any port 5432 ') # PostgreSQL
sudo('ufw allow from 54.242.38.48/20 to any port 27017') # MongoDB
sudo('ufw allow from 54.242.38.48/20 to any port 6379 ') # Redis
sudo('ufw allow from 54.242.38.48/20 to any port 11211 ') # Memcached
sudo('ufw --force enable') sudo('ufw --force enable')
def setup_db_motd(): def setup_db_motd():
@ -731,6 +737,39 @@ def copy_task_settings():
put('config/settings/task_settings.py', '%s/local_settings.py' % env.NEWSBLUR_PATH) put('config/settings/task_settings.py', '%s/local_settings.py' % env.NEWSBLUR_PATH)
run('echo "\nSERVER_NAME = \\\\"`hostname`\\\\"" >> %s/local_settings.py' % env.NEWSBLUR_PATH) run('echo "\nSERVER_NAME = \\\\"`hostname`\\\\"" >> %s/local_settings.py' % env.NEWSBLUR_PATH)
# ===============
# = Setup - EC2 =
# ===============
def setup_ec2_task():
AMI_NAME = 'ami-3c994355' # Ubuntu 64-bit 12.04 LTS
# INSTANCE_TYPE = 'c1.medium'
INSTANCE_TYPE = 'm1.small'
conn = EC2Connection(django_settings.AWS_ACCESS_KEY_ID, django_settings.AWS_SECRET_ACCESS_KEY)
reservation = conn.run_instances(AMI_NAME, instance_type=INSTANCE_TYPE,
key_name='sclay',
security_groups=['db-mongo'])
instance = reservation.instances[0]
print "Booting reservation: %s/%s (size: %s)" % (reservation, instance, INSTANCE_TYPE)
while True:
if instance.state == 'pending':
print ".",
sys.stdout.flush()
instance.update()
time.sleep(1)
elif instance.state == 'running':
print "...booted: %s" % instance.public_dns_name
time.sleep(5)
break
else:
print "!!! Error: %s" % instance.state
return
host = instance.public_dns_name
env.host_string = host
setup_task()
# ============== # ==============
# = Tasks - DB = # = Tasks - DB =

View file

@ -17,13 +17,13 @@ class NBMuninGraph(MuninGraph):
'total.draw' : 'LINE1', 'total.draw' : 'LINE1',
} }
stats = self.stats stats = self.stats
graph.update(dict((("%s.label" % s['_id'], s['_id']) for s in stats))) graph.update(dict((("%s.label" % s['_id'].replace('-', ''), s['_id']) for s in stats)))
graph.update(dict((("%s.draw" % s['_id'], "AREASTACK") for s in stats))) graph.update(dict((("%s.draw" % s['_id'].replace('-', ''), "AREASTACK") for s in stats)))
graph['graph_order'] = ' '.join(sorted(s['_id'] for s in stats)) graph['graph_order'] = ' '.join(sorted(s['_id'].replace('-', '') for s in stats))
return graph return graph
def calculate_metrics(self): def calculate_metrics(self):
servers = dict((("%s" % s['_id'], s['feeds']) for s in self.stats)) servers = dict((("%s" % s['_id'].replace('-', ''), s['feeds']) for s in self.stats))
servers['total'] = self.total[0]['feeds'] servers['total'] = self.total[0]['feeds']
return servers return servers