From 24c649df376474aaf6a70af8327e2b17d48c6897 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Mon, 15 Oct 2012 16:32:32 -0700 Subject: [PATCH] Readying task servers for ec2. --- config/monit_db.conf | 2 +- config/monit_task.conf | 14 +++---- config/supervisor_celerybeat.conf | 4 +- config/supervisor_celeryd.conf | 4 +- config/supervisor_celeryd_beat.conf | 4 +- config/supervisor_gunicorn.conf | 2 +- config/supervisor_node_favicons.conf | 4 +- config/supervisor_node_unread.conf | 4 +- fabfile.py | 57 ++++++++++++++++++++++----- utils/munin/newsblur_tasks_servers.py | 8 ++-- 10 files changed, 71 insertions(+), 32 deletions(-) diff --git a/config/monit_db.conf b/config/monit_db.conf index e5f75522e..2712e8ee4 100644 --- a/config/monit_db.conf +++ b/config/monit_db.conf @@ -7,6 +7,6 @@ set eventqueue slots 100 # optionally limit the queue size # If no feeds have been queued in the last 10 minutes, something is wrong -check file newsblur.log with path /home/sclay/newsblur/logs/newsblur.log +check file newsblur.log with path /srv/newsblur/logs/newsblur.log if timestamp > 10 minutes then exec "/usr/bin/supervisorctl restart celery" as uid root and gid root \ No newline at end of file diff --git a/config/monit_task.conf b/config/monit_task.conf index 9d4167cfe..e67b16083 100644 --- a/config/monit_task.conf +++ b/config/monit_task.conf @@ -7,13 +7,13 @@ set eventqueue slots 100 # optionally limit the queue size # If no feeds have been fetched in the last 10 minutes, something is wrong -check file newsblur.log with path /home/sclay/newsblur/logs/newsblur.log - if timestamp > 10 minutes then exec "/home/sclay/newsblur/utils/kill_celery.sh" - as uid sclay and gid sclay +check file newsblur.log with path /srv/newsblur/logs/newsblur.log + if timestamp > 10 minutes then exec "/srv/newsblur/utils/kill_celery.sh" + # as uid sclay and gid sclay check system task_server - if loadavg (1min) > 12 then exec "/home/sclay/newsblur/utils/kill_celery.sh" - as uid sclay and gid sclay - if loadavg (5min) > 8 then exec "/home/sclay/newsblur/utils/kill_celery.sh" - as uid sclay and gid sclay + if loadavg (1min) > 12 then exec "/srv/newsblur/utils/kill_celery.sh" + # as uid sclay and gid sclay + if loadavg (5min) > 8 then exec "/srv/newsblur/utils/kill_celery.sh" + # as uid sclay and gid sclay diff --git a/config/supervisor_celerybeat.conf b/config/supervisor_celerybeat.conf index 9d7947232..4200d224c 100644 --- a/config/supervisor_celerybeat.conf +++ b/config/supervisor_celerybeat.conf @@ -1,6 +1,6 @@ [program:celerybeat] -command=/home/sclay/newsblur/manage.py celerybeat --schedule=/home/sclay/newsblur/data/celerybeat-schedule.db --loglevel=INFO -directory=/home/sclay/newsblur +command=/srv/newsblur/manage.py celerybeat --schedule=/srv/newsblur/data/celerybeat-schedule.db --loglevel=INFO +directory=/srv/newsblur user=sclay numprocs=1 stdout_logfile=/var/log/celerybeat.log diff --git a/config/supervisor_celeryd.conf b/config/supervisor_celeryd.conf index 19c6d9aef..91dcb72ec 100644 --- a/config/supervisor_celeryd.conf +++ b/config/supervisor_celeryd.conf @@ -1,6 +1,6 @@ [program:celery] -command=/home/sclay/newsblur/manage.py celeryd --loglevel=INFO -Q new_feeds,work_queue,push_feeds,update_feeds -directory=/home/sclay/newsblur +command=/srv/newsblur/manage.py celeryd --loglevel=INFO -Q new_feeds,work_queue,push_feeds,update_feeds +directory=/srv/newsblur user=sclay numprocs=1 stdout_logfile=/var/log/celeryd.log diff --git a/config/supervisor_celeryd_beat.conf b/config/supervisor_celeryd_beat.conf index e3e433203..bab401ca3 100644 --- a/config/supervisor_celeryd_beat.conf +++ b/config/supervisor_celeryd_beat.conf @@ -1,6 +1,6 @@ [program:celery] -command=/home/sclay/newsblur/manage.py celeryd --loglevel=INFO -Q beat_tasks -c 1 -directory=/home/sclay/newsblur +command=/srv/newsblur/manage.py celeryd --loglevel=INFO -Q beat_tasks -c 1 +directory=/srv/newsblur user=sclay numprocs=1 stdout_logfile=/var/log/celeryd.log diff --git a/config/supervisor_gunicorn.conf b/config/supervisor_gunicorn.conf index 480c108e2..db68baabc 100644 --- a/config/supervisor_gunicorn.conf +++ b/config/supervisor_gunicorn.conf @@ -1,7 +1,7 @@ [program:gunicorn] #command=/home/conesus/newsblur/manage.py run_gunicorn -c gunicorn_conf.py command=gunicorn_django -c config/gunicorn_conf.py -directory=/home/sclay/newsblur +directory=/srv/newsblur user=sclay autostart=true autorestart=true diff --git a/config/supervisor_node_favicons.conf b/config/supervisor_node_favicons.conf index 6cc97b309..402c3f088 100644 --- a/config/supervisor_node_favicons.conf +++ b/config/supervisor_node_favicons.conf @@ -1,10 +1,10 @@ [program:node_favicons] command=node favicons.js -directory=/home/sclay/newsblur/node +directory=/srv/newsblur/node user=sclay autostart=true autorestart=true #redirect_stderr=True priority=991 stopsignal=HUP -stdout_logfile = /home/sclay/newsblur/logs/favicons.log +stdout_logfile = /srv/newsblur/logs/favicons.log diff --git a/config/supervisor_node_unread.conf b/config/supervisor_node_unread.conf index 0726236bc..c6dbcdb71 100644 --- a/config/supervisor_node_unread.conf +++ b/config/supervisor_node_unread.conf @@ -1,10 +1,10 @@ [program:node_unread] command=node unread_counts.js -directory=/home/sclay/newsblur/node +directory=/srv/newsblur/node user=sclay autostart=true autorestart=true #redirect_stderr=True priority=991 stopsignal=HUP -stdout_logfile = /home/sclay/newsblur/logs/unread_counts.log +stdout_logfile = /srv/newsblur/logs/unread_counts.log diff --git a/fabfile.py b/fabfile.py index bb5da67c8..e6780dcae 100644 --- a/fabfile.py +++ b/fabfile.py @@ -1,13 +1,13 @@ from fabric.api import cd, env, local, parallel, serial from fabric.api import put, run, settings, sudo # from fabric.colors import red, green, blue, cyan, magenta, white, yellow -try: - from boto.s3.connection import S3Connection - from boto.s3.key import Key -except ImportError: - print " ---> Boto not installed yet. No S3 connections available." +from boto.s3.connection import S3Connection +from boto.s3.key import Key +from boto.ec2.connection import EC2Connection from fabric.contrib import django import os +import time +import sys django.settings_module('settings') try: @@ -356,6 +356,8 @@ def add_machine_to_ssh(): def setup_repo(): with settings(warn_only=True): run('git clone https://github.com/samuelclay/NewsBlur.git newsblur') + sudo('mkdir -p /srv') + sudo('ln -s /home/ubuntu/newsblur /srv/newsblur') def setup_repo_local_settings(): with cd(env.NEWSBLUR_PATH): @@ -606,10 +608,14 @@ def setup_db_firewall(): sudo('ufw delete allow from 23.22.0.0/16 to any port 27017') # MongoDB sudo('ufw delete allow from 23.22.0.0/16 to any port 6379 ') # Redis sudo('ufw delete allow from 23.22.0.0/16 to any port 11211 ') # Memcached - sudo('ufw allow from 23.20.0.0/16 to any port 5432 ') # PostgreSQL - sudo('ufw allow from 23.20.0.0/16 to any port 27017') # MongoDB - sudo('ufw allow from 23.20.0.0/16 to any port 6379 ') # Redis - sudo('ufw allow from 23.20.0.0/16 to any port 11211 ') # Memcached + sudo('ufw delete allow from 23.20.0.0/16 to any port 5432 ') # PostgreSQL + sudo('ufw delete allow from 23.20.0.0/16 to any port 27017') # MongoDB + sudo('ufw delete allow from 23.20.0.0/16 to any port 6379 ') # Redis + sudo('ufw delete allow from 23.20.0.0/16 to any port 11211 ') # Memcached + sudo('ufw allow from 54.242.38.48/20 to any port 5432 ') # PostgreSQL + sudo('ufw allow from 54.242.38.48/20 to any port 27017') # MongoDB + sudo('ufw allow from 54.242.38.48/20 to any port 6379 ') # Redis + sudo('ufw allow from 54.242.38.48/20 to any port 11211 ') # Memcached sudo('ufw --force enable') def setup_db_motd(): @@ -731,7 +737,40 @@ def copy_task_settings(): put('config/settings/task_settings.py', '%s/local_settings.py' % env.NEWSBLUR_PATH) run('echo "\nSERVER_NAME = \\\\"`hostname`\\\\"" >> %s/local_settings.py' % env.NEWSBLUR_PATH) +# =============== +# = Setup - EC2 = +# =============== +def setup_ec2_task(): + AMI_NAME = 'ami-3c994355' # Ubuntu 64-bit 12.04 LTS + # INSTANCE_TYPE = 'c1.medium' + INSTANCE_TYPE = 'm1.small' + conn = EC2Connection(django_settings.AWS_ACCESS_KEY_ID, django_settings.AWS_SECRET_ACCESS_KEY) + reservation = conn.run_instances(AMI_NAME, instance_type=INSTANCE_TYPE, + key_name='sclay', + security_groups=['db-mongo']) + instance = reservation.instances[0] + print "Booting reservation: %s/%s (size: %s)" % (reservation, instance, INSTANCE_TYPE) + while True: + if instance.state == 'pending': + print ".", + sys.stdout.flush() + instance.update() + time.sleep(1) + elif instance.state == 'running': + print "...booted: %s" % instance.public_dns_name + time.sleep(5) + break + else: + print "!!! Error: %s" % instance.state + return + + host = instance.public_dns_name + env.host_string = host + + setup_task() + + # ============== # = Tasks - DB = # ============== diff --git a/utils/munin/newsblur_tasks_servers.py b/utils/munin/newsblur_tasks_servers.py index d4c96b9bf..3a94b60ac 100755 --- a/utils/munin/newsblur_tasks_servers.py +++ b/utils/munin/newsblur_tasks_servers.py @@ -17,13 +17,13 @@ class NBMuninGraph(MuninGraph): 'total.draw' : 'LINE1', } stats = self.stats - graph.update(dict((("%s.label" % s['_id'], s['_id']) for s in stats))) - graph.update(dict((("%s.draw" % s['_id'], "AREASTACK") for s in stats))) - graph['graph_order'] = ' '.join(sorted(s['_id'] for s in stats)) + graph.update(dict((("%s.label" % s['_id'].replace('-', ''), s['_id']) for s in stats))) + graph.update(dict((("%s.draw" % s['_id'].replace('-', ''), "AREASTACK") for s in stats))) + graph['graph_order'] = ' '.join(sorted(s['_id'].replace('-', '') for s in stats)) return graph def calculate_metrics(self): - servers = dict((("%s" % s['_id'], s['feeds']) for s in self.stats)) + servers = dict((("%s" % s['_id'].replace('-', ''), s['feeds']) for s in self.stats)) servers['total'] = self.total[0]['feeds'] return servers