diff --git a/.gitignore b/.gitignore index bdfd52106..a6a5f2ea7 100644 --- a/.gitignore +++ b/.gitignore @@ -43,7 +43,8 @@ templates/maintenance_on.html vendor/mms-agent/settings.py apps/social/spam.py venv* -/backups +backup +backups config/mongodb_keyfile.key # Docker Jinja templates diff --git a/.vscode/settings.json b/.vscode/settings.json index a36a363b5..67ccc6f96 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -16,12 +16,12 @@ "media/ios": true, "**/*.map": true, "ansible/playbooks/*/*": true, - "archive/*": true, + // "archive/*": true, "logs/*": true, - "static/*": true, + // "static/*": true, "media/fonts": true, "static/*.css": true, - "static/*.js": true, + "static/js/*.*.js": true, "blog/.jekyll-cache": true, "blog/_site": true, "docker/volumes": true, diff --git a/ansible/playbooks/deploy_app.yml b/ansible/playbooks/deploy_app.yml index 9f7a48a1a..cfee277ac 100644 --- a/ansible/playbooks/deploy_app.yml +++ b/ansible/playbooks/deploy_app.yml @@ -70,7 +70,7 @@ run_once: yes connection: local amazon.aws.aws_s3: - bucket: newsblur_backups + bucket: newsblur-backups object: /static_py3.tgz src: /srv/newsblur/static.tgz mode: put @@ -94,7 +94,7 @@ vars: ansible_python_interpreter: /usr/bin/python3 amazon.aws.aws_s3: - bucket: newsblur_backups + bucket: newsblur-backups object: /static_py3.tgz dest: /srv/newsblur/static.tgz mode: get diff --git a/ansible/roles/backups/tasks/main.yml b/ansible/roles/backups/tasks/main.yml index 4a6e0f556..00bff2dfb 100644 --- a/ansible/roles/backups/tasks/main.yml +++ b/ansible/roles/backups/tasks/main.yml @@ -17,14 +17,14 @@ - name: Set backup vars set_fact: redis_story_filename: backup_redis_story_2021-04-13-04-00.rdb.gz - postgres_filename: backup_postgresql_2022-01-06-19-46.sql.gz + postgres_filename: backup_postgresql_2022-02-03-04-00.sql.gz mongo_filename: backup_mongo_2021-03-15-04-00.tgz redis_filename: backup_redis_2021-03-15-04-00.rdb.gz tags: never, restore_postgres, restore_mongo, restore_redis, restore_redis_story - name: Download archives amazon.aws.aws_s3: - bucket: newsblur_backups + bucket: "newsblur-backups" object: "{{ item.dir }}{{ item.file }}" dest: "/srv/newsblur/backups/{{ item.file }}" mode: get diff --git a/ansible/roles/consul/tasks/get_consul_manager_ip.py b/ansible/roles/consul/tasks/get_consul_manager_ip.py index 96d21f975..0dfca056d 100755 --- a/ansible/roles/consul/tasks/get_consul_manager_ip.py +++ b/ansible/roles/consul/tasks/get_consul_manager_ip.py @@ -1,4 +1,4 @@ -#!/srv/newsblur/venv/newsblur3/bin/python +#!/usr/bin/env python import os import digitalocean diff --git a/ansible/roles/consul/tasks/main.yml b/ansible/roles/consul/tasks/main.yml index 789de3600..121d6b2f0 100644 --- a/ansible/roles/consul/tasks/main.yml +++ b/ansible/roles/consul/tasks/main.yml @@ -13,8 +13,9 @@ - name: Installing Consul become: yes apt: - pkg: consul - state: latest + allow_downgrades: yes + pkg: consul=1.10.4 + state: present - name: Register Manager IP run_once: yes diff --git a/ansible/roles/mongo/tasks/main.yml b/ansible/roles/mongo/tasks/main.yml index a4273bc8f..0f154f1e0 100644 --- a/ansible/roles/mongo/tasks/main.yml +++ b/ansible/roles/mongo/tasks/main.yml @@ -58,10 +58,17 @@ - name: Make backup directory become: yes file: - path: /opt/mongo/newsblur/backup/ + path: "/mnt/{{ inventory_hostname | regex_replace('db-|-', '') }}/backup/" state: directory - mode: 0666 + mode: 0777 +- name: Create symlink to mounted volume for backups to live + file: + state: link + src: "/mnt/{{ inventory_hostname | regex_replace('db-|-', '') }}/backup" + path: /srv/newsblur/backup + force: yes + - name: Start db-mongo docker container become: yes docker_container: @@ -86,7 +93,7 @@ - /srv/newsblur/ansible/roles/mongo/templates/mongo.conf:/etc/mongod.conf - /srv/newsblur/config/mongodb_keyfile.key:/srv/newsblur/config/mongodb_keyfile.key - /var/log/mongodb/:/var/log/mongodb/ - - /opt/mongo/newsblur/backup/:/backup/ + - /mnt/{{ inventory_hostname | regex_replace('db-|-', '') }}/backup/:/backup/ when: (inventory_hostname | regex_replace('[0-9]+', '')) in ['db-mongo', 'db-mongo-primary', 'db-mongo-secondary'] - name: Start db-mongo-analytics docker container @@ -114,7 +121,7 @@ - /srv/newsblur/ansible/roles/mongo/templates/mongo.analytics.conf:/etc/mongod.conf - /srv/newsblur/config/mongodb_keyfile.key:/srv/newsblur/config/mongodb_keyfile.key - /var/log/mongodb/:/var/log/mongodb/ - - /opt/mongo/newsblur/backup/:/backup/ + - /mnt/{{ inventory_hostname | regex_replace('db-|-', '') }}/backup/:/backup/ when: (inventory_hostname | regex_replace('[0-9]+', '')) == 'db-mongo-analytics' - name: Create mongo database user @@ -185,12 +192,18 @@ docker run --rm -it OUTPUT=$(eval sudo df / | head -n 2 | tail -1); -v /srv/newsblur:/srv/newsblur - --network=newsblurnet + --network=host --hostname {{ ansible_hostname }} newsblur/newsblur_python3 /srv/newsblur/utils/monitor_disk_usage.py $OUTPUT tags: - sanity-checker +- name: Copy common secrets + copy: + src: /srv/secrets-newsblur/settings/common_settings.py + dest: /srv/newsblur/newsblur_web/local_settings.py + register: app_changed + - name: Add mongo backup cron: name: mongo backup @@ -201,15 +214,15 @@ tags: - mongo-backup -- name: Add mongo starred_stories+stories backup - cron: - name: mongo starred/shared/all stories backup - minute: "0" - hour: "5" - job: /srv/newsblur/docker/mongo/backup_mongo_stories.sh - when: '"db-mongo-secondary1" in inventory_hostname' - tags: - - mongo-backup +# - name: Add mongo starred_stories+stories backup +# cron: +# name: mongo starred/shared/all stories backup +# minute: "0" +# hour: "5" +# job: /srv/newsblur/docker/mongo/backup_mongo.sh stories +# when: '"db-mongo-secondary1" in inventory_hostname' +# tags: +# - mongo-backup # Renaming a db-mongo3 to db-mongo2: # - Change hostname to db-mongo2 on Digital Ocean (doctl) diff --git a/ansible/roles/mongo/templates/consul_service.json b/ansible/roles/mongo/templates/consul_service.json index 192d901a0..8ca05190b 100644 --- a/ansible/roles/mongo/templates/consul_service.json +++ b/ansible/roles/mongo/templates/consul_service.json @@ -1,6 +1,6 @@ { "service": { - "name": "db-mongo", + "name": "db-mongo-staging", "id": "{{ inventory_hostname }}", "tags": [ "db" diff --git a/ansible/roles/postgres/tasks/main.yml b/ansible/roles/postgres/tasks/main.yml index ff3efb52e..54f282fe0 100644 --- a/ansible/roles/postgres/tasks/main.yml +++ b/ansible/roles/postgres/tasks/main.yml @@ -14,6 +14,13 @@ state: directory mode: 0777 +- name: Ensure postgres backup directory + become: yes + file: + path: /srv/newsblur/backups + state: directory + mode: 0777 + - name: Start postgres docker containers become: yes docker_container: @@ -63,6 +70,12 @@ notify: - reload consul +- name: Copy common secrets + copy: + src: /srv/secrets-newsblur/settings/common_settings.py + dest: /srv/newsblur/newsblur_web/local_settings.py + register: app_changed + - name: Add sanity checkers cronjob for disk usage become: yes cron: @@ -78,19 +91,19 @@ --hostname {{ ansible_hostname }} newsblur/newsblur_python3 /srv/newsblur/utils/monitor_disk_usage.py $OUTPUT +- name: Add postgres backup log + become: yes + file: + path: /var/log/postgres_backup.log + state: touch + mode: 0777 + owner: 1000 + group: 1001 + - name: Add postgres backup cron: name: postgres backup minute: "0" hour: "4" - job: >- - NOW=$(eval date +%F-%H-%M); - BACKUP_FILE=backup_postgresql_${NOW}.sql; - sudo docker exec -it postgres - /usr/lib/postgresql/13/bin/pg_dump -U newsblur -h 127.0.0.1 -Fc newsblur > backup/$BACKUP_FILE; - sudo docker run --rm -it - -v /srv/newsblur:/srv/newsblur - -v /backup/:/backup/ - --network=newsblurnet - newsblur/newsblur_python3 - python /srv/newsblur/utils/backups/backup_psql.py + job: /srv/newsblur/docker/postgres/backup_postgres.sh 1> /var/log/postgres_backup.log 2>&1 + diff --git a/apps/api/views.py b/apps/api/views.py index b374c97ac..f9126c970 100644 --- a/apps/api/views.py +++ b/apps/api/views.py @@ -217,6 +217,10 @@ def check_share_on_site(request, token): logging.user(request.user, "~FBFinding feed (check_share_on_site): %s" % rss_url) feed = Feed.get_feed_from_url(rss_url, create=False, fetch=False) + if not feed: + rss_url = urllib.parse.urljoin(story_url, rss_url) + logging.user(request.user, "~FBFinding feed (check_share_on_site): %s" % rss_url) + feed = Feed.get_feed_from_url(rss_url, create=False, fetch=False) if not feed: logging.user(request.user, "~FBFinding feed (check_share_on_site): %s" % story_url) feed = Feed.get_feed_from_url(story_url, create=False, fetch=False) diff --git a/apps/reader/urls.py b/apps/reader/urls.py index 9c109ab1c..29bfe6525 100644 --- a/apps/reader/urls.py +++ b/apps/reader/urls.py @@ -14,6 +14,7 @@ urlpatterns = [ url(r'^page/(?P\d+)', views.load_feed_page, name='load-feed-page'), url(r'^refresh_feed/(?P\d+)', views.refresh_feed, name='refresh-feed'), url(r'^favicons', views.load_feed_favicons, name='load-feed-favicons'), + url(r'^river_stories_widget', views.load_river_stories_widget, name='load-river-stories-widget'), url(r'^river_stories', views.load_river_stories__redis, name='load-river-stories'), url(r'^complete_river', views.complete_river, name='complete-river'), url(r'^refresh_feeds', views.refresh_feeds, name='refresh-feeds'), diff --git a/apps/reader/views.py b/apps/reader/views.py index b34a48b9f..11c1c70e2 100644 --- a/apps/reader/views.py +++ b/apps/reader/views.py @@ -4,7 +4,13 @@ import redis import requests import random import zlib +import concurrent import re +import ssl +import socket +import base64 +import urllib.parse +import urllib.request from django.shortcuts import get_object_or_404 from django.shortcuts import render from django.contrib.auth.decorators import login_required @@ -1453,7 +1459,7 @@ def load_river_stories__redis(request): story_hashes = [] unread_feed_story_hashes = [] - mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order) + mstories = MStory.objects(story_hash__in=story_hashes[:limit]).order_by(story_date_order) stories = Feed.format_stories(mstories) found_feed_ids = list(set([story['story_feed_id'] for story in stories])) @@ -1594,6 +1600,57 @@ def load_river_stories__redis(request): return data + +@json.json_view +def load_river_stories_widget(request): + logging.user(request, "Widget load") + river_stories_data = json.decode(load_river_stories__redis(request).content) + timeout = 3 + start = time.time() + + def load_url(url): + original_url = url + url = urllib.parse.urljoin(settings.NEWSBLUR_URL, url) + scontext = ssl.SSLContext(ssl.PROTOCOL_TLS) + scontext.verify_mode = ssl.VerifyMode.CERT_NONE + try: + conn = urllib.request.urlopen(url, context=scontext, timeout=timeout) + except urllib.request.URLError: + url = url.replace('localhost', 'haproxy') + conn = urllib.request.urlopen(url, context=scontext, timeout=timeout) + except urllib.request.URLError as e: + logging.user(request.user, '"%s" not fetched in %ss: %s' % (url, (time.time() - start), e)) + return None + except socket.timeout: + logging.user(request.user, '"%s" not fetched in %ss' % (url, (time.time() - start))) + return None + data = conn.read() + logging.user(request.user, '"%s" fetched in %ss' % (url, (time.time() - start))) + return dict(url=original_url, data=data) + + # Find the image thumbnails and download in parallel + thumbnail_urls = [] + for story in river_stories_data['stories']: + thumbnail_values = list(story['secure_image_thumbnails'].values()) + if thumbnail_values: + thumbnail_urls.append(thumbnail_values[0]) + + with concurrent.futures.ThreadPoolExecutor(max_workers=6) as executor: + pages = executor.map(load_url, thumbnail_urls) + + # Reassemble thumbnails back into stories + thumbnail_data = dict() + for page in pages: + if not page: continue + thumbnail_data[page['url']] = base64.b64encode(page['data']).decode('utf-8') + for story in river_stories_data['stories']: + thumbnail_values = list(story['secure_image_thumbnails'].values()) + if thumbnail_values and thumbnail_values[0] in thumbnail_data: + story['select_thumbnail_data'] = thumbnail_data[thumbnail_values[0]] + + logging.user(request, ("Elapsed Time: %ss" % (time.time() - start))) + + return river_stories_data @json.json_view def complete_river(request): diff --git a/config/munin/aws_elb_latency b/archive/munin/aws_elb_latency similarity index 100% rename from config/munin/aws_elb_latency rename to archive/munin/aws_elb_latency diff --git a/config/munin/aws_elb_requests b/archive/munin/aws_elb_requests similarity index 100% rename from config/munin/aws_elb_requests rename to archive/munin/aws_elb_requests diff --git a/config/munin/aws_sqs_queue_length_ b/archive/munin/aws_sqs_queue_length_ similarity index 100% rename from config/munin/aws_sqs_queue_length_ rename to archive/munin/aws_sqs_queue_length_ diff --git a/config/munin/cassandra_cfcounts b/archive/munin/cassandra_cfcounts similarity index 100% rename from config/munin/cassandra_cfcounts rename to archive/munin/cassandra_cfcounts diff --git a/config/munin/cassandra_key_cache_ratio b/archive/munin/cassandra_key_cache_ratio similarity index 100% rename from config/munin/cassandra_key_cache_ratio rename to archive/munin/cassandra_key_cache_ratio diff --git a/config/munin/cassandra_latency b/archive/munin/cassandra_latency similarity index 100% rename from config/munin/cassandra_latency rename to archive/munin/cassandra_latency diff --git a/config/munin/cassandra_load b/archive/munin/cassandra_load similarity index 100% rename from config/munin/cassandra_load rename to archive/munin/cassandra_load diff --git a/config/munin/cassandra_pending b/archive/munin/cassandra_pending similarity index 100% rename from config/munin/cassandra_pending rename to archive/munin/cassandra_pending diff --git a/config/munin/ddwrt_wl_rate b/archive/munin/ddwrt_wl_rate similarity index 100% rename from config/munin/ddwrt_wl_rate rename to archive/munin/ddwrt_wl_rate diff --git a/config/munin/ddwrt_wl_signal b/archive/munin/ddwrt_wl_signal similarity index 100% rename from config/munin/ddwrt_wl_signal rename to archive/munin/ddwrt_wl_signal diff --git a/config/munin/gearman_connections b/archive/munin/gearman_connections similarity index 100% rename from config/munin/gearman_connections rename to archive/munin/gearman_connections diff --git a/config/munin/gearman_queues b/archive/munin/gearman_queues similarity index 100% rename from config/munin/gearman_queues rename to archive/munin/gearman_queues diff --git a/config/munin/haproxy b/archive/munin/haproxy similarity index 100% rename from config/munin/haproxy rename to archive/munin/haproxy diff --git a/config/munin/hookbox b/archive/munin/hookbox similarity index 100% rename from config/munin/hookbox rename to archive/munin/hookbox diff --git a/config/munin/loadavg b/archive/munin/loadavg similarity index 100% rename from config/munin/loadavg rename to archive/munin/loadavg diff --git a/config/munin/memcached_bytes b/archive/munin/memcached_bytes similarity index 100% rename from config/munin/memcached_bytes rename to archive/munin/memcached_bytes diff --git a/config/munin/memcached_connections b/archive/munin/memcached_connections similarity index 100% rename from config/munin/memcached_connections rename to archive/munin/memcached_connections diff --git a/config/munin/memcached_curr_items b/archive/munin/memcached_curr_items similarity index 100% rename from config/munin/memcached_curr_items rename to archive/munin/memcached_curr_items diff --git a/config/munin/memcached_items b/archive/munin/memcached_items similarity index 100% rename from config/munin/memcached_items rename to archive/munin/memcached_items diff --git a/config/munin/memcached_queries b/archive/munin/memcached_queries similarity index 100% rename from config/munin/memcached_queries rename to archive/munin/memcached_queries diff --git a/config/munin/mongo_btree b/archive/munin/mongo_btree similarity index 100% rename from config/munin/mongo_btree rename to archive/munin/mongo_btree diff --git a/config/munin/mongo_indexsize b/archive/munin/mongo_indexsize similarity index 100% rename from config/munin/mongo_indexsize rename to archive/munin/mongo_indexsize diff --git a/config/munin/mongo_mem b/archive/munin/mongo_mem similarity index 100% rename from config/munin/mongo_mem rename to archive/munin/mongo_mem diff --git a/config/munin/mongo_ops b/archive/munin/mongo_ops similarity index 100% rename from config/munin/mongo_ops rename to archive/munin/mongo_ops diff --git a/config/munin/mongodb_conn b/archive/munin/mongodb_conn similarity index 100% rename from config/munin/mongodb_conn rename to archive/munin/mongodb_conn diff --git a/config/munin/mongodb_heap_usage b/archive/munin/mongodb_heap_usage similarity index 100% rename from config/munin/mongodb_heap_usage rename to archive/munin/mongodb_heap_usage diff --git a/config/munin/mongodb_objects_newsblur b/archive/munin/mongodb_objects_newsblur similarity index 100% rename from config/munin/mongodb_objects_newsblur rename to archive/munin/mongodb_objects_newsblur diff --git a/config/munin/mongodb_ops b/archive/munin/mongodb_ops similarity index 100% rename from config/munin/mongodb_ops rename to archive/munin/mongodb_ops diff --git a/config/munin/mongodb_page_faults b/archive/munin/mongodb_page_faults similarity index 100% rename from config/munin/mongodb_page_faults rename to archive/munin/mongodb_page_faults diff --git a/config/munin/mongodb_queues b/archive/munin/mongodb_queues similarity index 100% rename from config/munin/mongodb_queues rename to archive/munin/mongodb_queues diff --git a/config/munin/mongodb_replset_lag b/archive/munin/mongodb_replset_lag similarity index 100% rename from config/munin/mongodb_replset_lag rename to archive/munin/mongodb_replset_lag diff --git a/config/munin/mongodb_size_newsblur b/archive/munin/mongodb_size_newsblur similarity index 100% rename from config/munin/mongodb_size_newsblur rename to archive/munin/mongodb_size_newsblur diff --git a/vendor/munin/__init__.py b/archive/munin/munin/__init__.py similarity index 100% rename from vendor/munin/__init__.py rename to archive/munin/munin/__init__.py diff --git a/vendor/munin/cassandra.py b/archive/munin/munin/cassandra.py similarity index 100% rename from vendor/munin/cassandra.py rename to archive/munin/munin/cassandra.py diff --git a/vendor/munin/ddwrt.py b/archive/munin/munin/ddwrt.py similarity index 100% rename from vendor/munin/ddwrt.py rename to archive/munin/munin/ddwrt.py diff --git a/vendor/munin/gearman.py b/archive/munin/munin/gearman.py similarity index 100% rename from vendor/munin/gearman.py rename to archive/munin/munin/gearman.py diff --git a/vendor/munin/memcached.py b/archive/munin/munin/memcached.py similarity index 100% rename from vendor/munin/memcached.py rename to archive/munin/munin/memcached.py diff --git a/vendor/munin/mongodb.py b/archive/munin/munin/mongodb.py similarity index 100% rename from vendor/munin/mongodb.py rename to archive/munin/munin/mongodb.py diff --git a/vendor/munin/mysql.py b/archive/munin/munin/mysql.py similarity index 100% rename from vendor/munin/mysql.py rename to archive/munin/munin/mysql.py diff --git a/vendor/munin/nginx.py b/archive/munin/munin/nginx.py similarity index 100% rename from vendor/munin/nginx.py rename to archive/munin/munin/nginx.py diff --git a/vendor/munin/pgbouncer.py b/archive/munin/munin/pgbouncer.py similarity index 100% rename from vendor/munin/pgbouncer.py rename to archive/munin/munin/pgbouncer.py diff --git a/vendor/munin/postgres.py b/archive/munin/munin/postgres.py similarity index 100% rename from vendor/munin/postgres.py rename to archive/munin/munin/postgres.py diff --git a/vendor/munin/redis.py b/archive/munin/munin/redis.py similarity index 100% rename from vendor/munin/redis.py rename to archive/munin/munin/redis.py diff --git a/vendor/munin/riak.py b/archive/munin/munin/riak.py similarity index 100% rename from vendor/munin/riak.py rename to archive/munin/munin/riak.py diff --git a/config/munin/mysql_dbrows_ b/archive/munin/mysql_dbrows_ similarity index 100% rename from config/munin/mysql_dbrows_ rename to archive/munin/mysql_dbrows_ diff --git a/config/munin/mysql_dbsize_ b/archive/munin/mysql_dbsize_ similarity index 100% rename from config/munin/mysql_dbsize_ rename to archive/munin/mysql_dbsize_ diff --git a/config/munin/nginx_connections b/archive/munin/nginx_connections similarity index 100% rename from config/munin/nginx_connections rename to archive/munin/nginx_connections diff --git a/config/munin/nginx_requests b/archive/munin/nginx_requests similarity index 100% rename from config/munin/nginx_requests rename to archive/munin/nginx_requests diff --git a/config/munin/path_size b/archive/munin/path_size similarity index 100% rename from config/munin/path_size rename to archive/munin/path_size diff --git a/config/munin/pg_newsblur_connections b/archive/munin/pg_newsblur_connections similarity index 100% rename from config/munin/pg_newsblur_connections rename to archive/munin/pg_newsblur_connections diff --git a/config/munin/pg_newsblur_db_size b/archive/munin/pg_newsblur_db_size similarity index 100% rename from config/munin/pg_newsblur_db_size rename to archive/munin/pg_newsblur_db_size diff --git a/config/munin/pg_newsblur_locks b/archive/munin/pg_newsblur_locks similarity index 100% rename from config/munin/pg_newsblur_locks rename to archive/munin/pg_newsblur_locks diff --git a/config/munin/pg_newsblur_stat_bgwriter b/archive/munin/pg_newsblur_stat_bgwriter similarity index 100% rename from config/munin/pg_newsblur_stat_bgwriter rename to archive/munin/pg_newsblur_stat_bgwriter diff --git a/config/munin/pg_newsblur_stat_database b/archive/munin/pg_newsblur_stat_database similarity index 100% rename from config/munin/pg_newsblur_stat_database rename to archive/munin/pg_newsblur_stat_database diff --git a/config/munin/pg_newsblur_stat_tables b/archive/munin/pg_newsblur_stat_tables similarity index 100% rename from config/munin/pg_newsblur_stat_tables rename to archive/munin/pg_newsblur_stat_tables diff --git a/config/munin/pg_newsblur_statio_tables b/archive/munin/pg_newsblur_statio_tables similarity index 100% rename from config/munin/pg_newsblur_statio_tables rename to archive/munin/pg_newsblur_statio_tables diff --git a/config/munin/pgbouncer_pools_cl_ b/archive/munin/pgbouncer_pools_cl_ similarity index 100% rename from config/munin/pgbouncer_pools_cl_ rename to archive/munin/pgbouncer_pools_cl_ diff --git a/config/munin/pgbouncer_pools_sv_ b/archive/munin/pgbouncer_pools_sv_ similarity index 100% rename from config/munin/pgbouncer_pools_sv_ rename to archive/munin/pgbouncer_pools_sv_ diff --git a/config/munin/pgbouncer_stats_avg_bytes_ b/archive/munin/pgbouncer_stats_avg_bytes_ similarity index 100% rename from config/munin/pgbouncer_stats_avg_bytes_ rename to archive/munin/pgbouncer_stats_avg_bytes_ diff --git a/config/munin/pgbouncer_stats_avg_query_ b/archive/munin/pgbouncer_stats_avg_query_ similarity index 100% rename from config/munin/pgbouncer_stats_avg_query_ rename to archive/munin/pgbouncer_stats_avg_query_ diff --git a/config/munin/pgbouncer_stats_avg_req_ b/archive/munin/pgbouncer_stats_avg_req_ similarity index 100% rename from config/munin/pgbouncer_stats_avg_req_ rename to archive/munin/pgbouncer_stats_avg_req_ diff --git a/config/munin/postgres_block_read_ b/archive/munin/postgres_block_read_ similarity index 100% rename from config/munin/postgres_block_read_ rename to archive/munin/postgres_block_read_ diff --git a/config/munin/postgres_commits_ b/archive/munin/postgres_commits_ similarity index 100% rename from config/munin/postgres_commits_ rename to archive/munin/postgres_commits_ diff --git a/config/munin/postgres_connections b/archive/munin/postgres_connections similarity index 100% rename from config/munin/postgres_connections rename to archive/munin/postgres_connections diff --git a/config/munin/postgres_locks b/archive/munin/postgres_locks similarity index 100% rename from config/munin/postgres_locks rename to archive/munin/postgres_locks diff --git a/config/munin/postgres_queries_ b/archive/munin/postgres_queries_ similarity index 100% rename from config/munin/postgres_queries_ rename to archive/munin/postgres_queries_ diff --git a/config/munin/postgres_space_ b/archive/munin/postgres_space_ similarity index 100% rename from config/munin/postgres_space_ rename to archive/munin/postgres_space_ diff --git a/config/munin/postgres_table_sizes b/archive/munin/postgres_table_sizes similarity index 100% rename from config/munin/postgres_table_sizes rename to archive/munin/postgres_table_sizes diff --git a/config/munin/redis_active_connections b/archive/munin/redis_active_connections similarity index 100% rename from config/munin/redis_active_connections rename to archive/munin/redis_active_connections diff --git a/config/munin/redis_commands b/archive/munin/redis_commands similarity index 100% rename from config/munin/redis_commands rename to archive/munin/redis_commands diff --git a/config/munin/redis_connects b/archive/munin/redis_connects similarity index 100% rename from config/munin/redis_connects rename to archive/munin/redis_connects diff --git a/config/munin/redis_sessions_active_connections b/archive/munin/redis_sessions_active_connections similarity index 100% rename from config/munin/redis_sessions_active_connections rename to archive/munin/redis_sessions_active_connections diff --git a/config/munin/redis_sessions_connects b/archive/munin/redis_sessions_connects similarity index 100% rename from config/munin/redis_sessions_connects rename to archive/munin/redis_sessions_connects diff --git a/config/munin/redis_sessions_size b/archive/munin/redis_sessions_size similarity index 100% rename from config/munin/redis_sessions_size rename to archive/munin/redis_sessions_size diff --git a/config/munin/redis_sessions_used_memory b/archive/munin/redis_sessions_used_memory similarity index 100% rename from config/munin/redis_sessions_used_memory rename to archive/munin/redis_sessions_used_memory diff --git a/config/munin/redis_sesssions_commands b/archive/munin/redis_sesssions_commands similarity index 100% rename from config/munin/redis_sesssions_commands rename to archive/munin/redis_sesssions_commands diff --git a/config/munin/redis_size b/archive/munin/redis_size similarity index 100% rename from config/munin/redis_size rename to archive/munin/redis_size diff --git a/config/munin/redis_story_active_connections b/archive/munin/redis_story_active_connections similarity index 100% rename from config/munin/redis_story_active_connections rename to archive/munin/redis_story_active_connections diff --git a/config/munin/redis_story_commands b/archive/munin/redis_story_commands similarity index 100% rename from config/munin/redis_story_commands rename to archive/munin/redis_story_commands diff --git a/config/munin/redis_story_connects b/archive/munin/redis_story_connects similarity index 100% rename from config/munin/redis_story_connects rename to archive/munin/redis_story_connects diff --git a/config/munin/redis_story_size b/archive/munin/redis_story_size similarity index 100% rename from config/munin/redis_story_size rename to archive/munin/redis_story_size diff --git a/config/munin/redis_story_used_memory b/archive/munin/redis_story_used_memory similarity index 100% rename from config/munin/redis_story_used_memory rename to archive/munin/redis_story_used_memory diff --git a/config/munin/redis_used_memory b/archive/munin/redis_used_memory similarity index 100% rename from config/munin/redis_used_memory rename to archive/munin/redis_used_memory diff --git a/config/munin/request_time b/archive/munin/request_time similarity index 100% rename from config/munin/request_time rename to archive/munin/request_time diff --git a/config/munin/riak_ops b/archive/munin/riak_ops similarity index 100% rename from config/munin/riak_ops rename to archive/munin/riak_ops diff --git a/config/munin/tc_size b/archive/munin/tc_size similarity index 100% rename from config/munin/tc_size rename to archive/munin/tc_size diff --git a/docker/mongo/backup_mongo.sh b/docker/mongo/backup_mongo.sh index be67a8c7e..c3b97c66f 100755 --- a/docker/mongo/backup_mongo.sh +++ b/docker/mongo/backup_mongo.sh @@ -1,34 +1,66 @@ #!/usr/bin/env bash collections=( - classifier_tag - classifier_author - classifier_feed - classifier_title - # shared_stories + activities category category_site - sent_emails - social_profile - social_subscription - social_services - statistics - user_search + classifier_author + classifier_feed + classifier_tag + classifier_title + custom_styling + dashboard_river + # feed_icons + # feed_pages feedback + # fetch_exception_history + # fetch_history + follow_request + gift_codes + inline + interactions + m_dashboard_river + notification_tokens + notifications + popularity_query + redeemed_codes + saved_searches + sent_emails + # shared_stories + social_invites + social_profile + social_services + social_subscription + # starred_stories + starred_stories_counts + statistics + # stories + system.profile + system.users + # uploaded_opml + user_search ) +if [ "$1" = "stories" ]; then + collections+=( + shared_stories + starred_stories + ) +fi + +now=$(date '+%Y-%m-%d-%H-%M') + for collection in ${collections[@]}; do - now=$(date '+%Y-%m-%d-%H-%M') echo "---> Dumping $collection - ${now}" - docker exec -it mongo mongodump -d newsblur -c $collection -o /backup/backup_mongo + docker exec -it mongo mongodump -d newsblur -c $collection -o /backup done; -echo " ---> Compressing backup_mongo.tgz" -tar -zcf /opt/mongo/newsblur/backup/backup_mongo.tgz /opt/mongo/newsblur/backup/backup_mongo +echo " ---> Compressing /srv/newsblur/backup/newsblur into /srv/newsblur/backup/backup_mongo_${now}.tgz" +tar -zcf /srv/newsblur/backup/backup_mongo_${now}.tgz -C / srv/newsblur/backup/newsblur echo " ---> Uploading backups to S3" -docker run --rm -v /srv/newsblur:/srv/newsblur -v /opt/mongo/newsblur/backup/:/opt/mongo/newsblur/backup/ --network=newsblurnet newsblur/newsblur_python3:latest python /srv/newsblur/utils/backups/backup_mongo.py +docker run --user 1000:1001 --rm -v /srv/newsblur:/srv/newsblur -v /srv/newsblur/backup/:/srv/newsblur/backup/ --network=host newsblur/newsblur_python3:latest python /srv/newsblur/utils/backups/backup_mongo.py # Don't delete backup since the backup_mongo.py script will rm them ## rm /opt/mongo/newsblur/backup/backup_mongo_${now}.tgz diff --git a/docker/postgres/backup_postgres.sh b/docker/postgres/backup_postgres.sh new file mode 100755 index 000000000..97d5c8b16 --- /dev/null +++ b/docker/postgres/backup_postgres.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +now=$(date '+%Y-%m-%d-%H-%M') + +echo "---> PG dumping - ${now}" +BACKUP_FILE="/srv/newsblur/backup/backup_postgresql_${now}.sql" +sudo docker exec -it postgres /usr/lib/postgresql/13/bin/pg_dump -U newsblur -h 127.0.0.1 -Fc newsblur > $BACKUP_FILE + +echo " ---> Uploading postgres backup to S3" +sudo docker run --user 1000:1001 --rm \ + -v /srv/newsblur:/srv/newsblur \ + --network=host \ + newsblur/newsblur_python3 \ + python /srv/newsblur/utils/backups/backup_psql.py + +# Don't delete backup since the backup_mongo.py script will rm them +## rm /opt/mongo/newsblur/backup/backup_mongo_${now}.tgz +## rm /opt/mongo/newsblur/backup/backup_mongo_${now} +echo " ---> Finished uploading backups to S3: " diff --git a/flask_metrics/flask_metrics_mongo.py b/flask_metrics/flask_metrics_mongo.py index 9b8e4b2c5..9f6ec8845 100644 --- a/flask_metrics/flask_metrics_mongo.py +++ b/flask_metrics/flask_metrics_mongo.py @@ -22,7 +22,10 @@ MONGO_HOST = settings.SERVER_NAME @app.route("/objects/") def objects(): - stats = connection.newsblur.command("dbstats") + try: + stats = connection.newsblur.command("dbstats") + except pymongo.errors.OperationFailure as e: + return Response(f"Operation failure: {e}", 500) data = dict(objects=stats['objects']) formatted_data = {} for k, v in data.items(): @@ -67,10 +70,13 @@ def repl_set_lag(): return primary_optime - oldest_secondary_optime - # no such item for Cursor instance - oplog_length = _get_oplog_length() - # not running with --replSet - replication_lag = _get_max_replication_lag() + try: + # no such item for Cursor instance + oplog_length = _get_oplog_length() + # not running with --replSet + replication_lag = _get_max_replication_lag() + except pymongo.errors.OperationFailure as e: + return Response(f"Operation failure: {e}", 500) formatted_data = {} for k, v in oplog_length.items(): @@ -89,7 +95,10 @@ def repl_set_lag(): @app.route("/size/") def size(): - stats = connection.newsblur.command("dbstats") + try: + stats = connection.newsblur.command("dbstats") + except pymongo.errors.OperationFailure as e: + return Response(f"Operation failure: {e}", 500) data = dict(size=stats['fsUsedSize']) formatted_data = {} for k, v in data.items(): @@ -106,7 +115,10 @@ def size(): @app.route("/ops/") def ops(): - status = connection.admin.command('serverStatus') + try: + status = connection.admin.command('serverStatus') + except pymongo.errors.OperationFailure as e: + return Response(f"Operation failure: {e}", 500) data = dict( (q, status["opcounters"][q]) for q in status['opcounters'].keys() @@ -127,7 +139,10 @@ def ops(): @app.route("/page-faults/") def page_faults(): - status = connection.admin.command('serverStatus') + try: + status = connection.admin.command('serverStatus') + except pymongo.errors.OperationFailure as e: + return Response(f"Operation failure: {e}", 500) try: value = status['extra_info']['page_faults'] except KeyError: @@ -148,7 +163,10 @@ def page_faults(): @app.route("/page-queues/") def page_queues(): - status = connection.admin.command('serverStatus') + try: + status = connection.admin.command('serverStatus') + except pymongo.errors.OperationFailure as e: + return Response(f"Operation failure: {e}", 500) data = dict( (q, status["globalLock"]["currentQueue"][q]) for q in ("readers", "writers") diff --git a/flask_monitor/db_monitor.py b/flask_monitor/db_monitor.py index a2ba07c23..d6fd9979e 100644 --- a/flask_monitor/db_monitor.py +++ b/flask_monitor/db_monitor.py @@ -1,4 +1,4 @@ -from flask import Flask, abort, request +from flask import Flask, abort, request, Response import os import psycopg2 import pymysql @@ -39,7 +39,7 @@ def db_check_postgres(): conn = psycopg2.connect(connect_params) except: print(" ---> Postgres can't connect to the database: %s" % connect_params) - abort(503) + abort(Response("Can't connect to db", 503)) cur = conn.cursor() cur.execute("""SELECT id FROM feeds ORDER BY feeds.id DESC LIMIT 1""") @@ -47,7 +47,7 @@ def db_check_postgres(): for row in rows: return str(row[0]) - abort(504) + abort(Response("No rows found", 504)) @app.route("/db_check/mysql") def db_check_mysql(): @@ -70,7 +70,7 @@ def db_check_mysql(): db=settings.DATABASES['default']['NAME']) except: print(" ---> Mysql can't connect to the database: %s" % connect_params) - abort(503) + abort(Response("Can't connect to mysql db", 503)) cur = conn.cursor() cur.execute("""SELECT id FROM feeds ORDER BY feeds.id DESC LIMIT 1""") @@ -78,7 +78,7 @@ def db_check_mysql(): for row in rows: return str(row[0]) - abort(504) + abort(Response("No rows found", 504)) @app.route("/db_check/mongo") def db_check_mongo(): @@ -90,21 +90,21 @@ def db_check_mongo(): client = pymongo.MongoClient(f"mongodb://{settings.MONGO_DB['username']}:{settings.MONGO_DB['password']}@{settings.SERVER_NAME}.node.nyc1.consul/?authSource=admin") db = client.newsblur except: - abort(503) + abort(Response("Can't connect to db", 503)) try: stories = db.stories.estimated_document_count() except pymongo.errors.NotMasterError: - abort(504) + abort(Response("Not Master", 504)) except pymongo.errors.ServerSelectionTimeoutError: - abort(505) + abort(Response("Server selection timeout", 503)) except pymongo.errors.OperationFailure as e: if 'Authentication failed' in str(e): - abort(506) - abort(507) + abort(Response("Auth failed", 506)) + abort(Response("Operation Failure", 507)) if not stories: - abort(510) + abort(Response("No stories", 510)) status = client.admin.command('replSetGetStatus') members = status['members'] @@ -120,16 +120,15 @@ def db_check_mongo(): oldest_secondary_optime = optime['ts'].time if not primary_optime or not oldest_secondary_optime: - abort(511) + abort(Response("No optime", 511)) # if primary_optime - oldest_secondary_optime > 100: - # abort(512) + # abort(Response("Data is too old", 512)) return str(stories) @app.route("/db_check/mongo_analytics") def db_check_mongo_analytics(): - return str(1) if request.args.get('consul') == '1': return str(1) @@ -137,19 +136,19 @@ def db_check_mongo_analytics(): client = pymongo.MongoClient(f"mongodb://{settings.MONGO_ANALYTICS_DB['username']}:{settings.MONGO_ANALYTICS_DB['password']}@{settings.SERVER_NAME}/?authSource=admin") db = client.nbanalytics except: - abort(503) + abort(Response("Can't connect to db", 503)) try: fetches = db.feed_fetches.estimated_document_count() except (pymongo.errors.NotMasterError, pymongo.errors.ServerSelectionTimeoutError): - abort(504) + abort(Response("Not Master / Server selection timeout", 504)) except pymongo.errors.OperationFailure as e: if 'Authentication failed' in str(e): - abort(505) - abort(506) + abort(Response("Auth failed", 505)) + abort(Response("Operation failure", 506)) if not fetches: - abort(510) + abort(Response("No fetches in data", 510)) return str(fetches) @@ -161,17 +160,17 @@ def db_check_redis_user(): try: r = redis.Redis('db-redis-user.service.nyc1.consul', db=0) except: - abort(503) + abort(Response("Can't connect to db", 503)) try: randkey = r.randomkey() except: - abort(504) + abort(Response("Couldn't process randomkey", 504)) if randkey: return str(randkey) else: - abort(505) + abort(Response("Can't find a randomkey", 505)) @app.route("/db_check/redis_story") def db_check_redis_story(): @@ -181,17 +180,17 @@ def db_check_redis_story(): try: r = redis.Redis('db-redis-story.service.nyc1.consul', db=1) except: - abort(503) + abort(Response("Can't connect to db", 503)) try: randkey = r.randomkey() except: - abort(504) + abort(Response("Couldn't process randomkey", 504)) if randkey: return str(randkey) else: - abort(505) + abort(Response("Can't find a randomkey", 505)) @app.route("/db_check/redis_sessions") def db_check_redis_sessions(): @@ -201,17 +200,17 @@ def db_check_redis_sessions(): try: r = redis.Redis('db-redis-sessions.service.nyc1.consul', db=5) except: - abort(503) + abort(Response("Can't connect to db", 503)) try: randkey = r.randomkey() except: - abort(504) + abort(Response("Couldn't process randomkey", 504)) if randkey: return str(randkey) else: - abort(505) + abort(Response("Can't find a randomkey", 505)) @app.route("/db_check/redis_pubsub") def db_check_redis_pubsub(): @@ -221,24 +220,24 @@ def db_check_redis_pubsub(): try: r = redis.Redis('db-redis-pubsub.service.nyc1.consul', db=1) except: - abort(503) + abort(Response("Can't connect to db", 503)) try: pubsub_numpat = r.pubsub_numpat() except: - abort(504) + abort(Response("Couldn't process pubsub_numpat", 504)) if pubsub_numpat or isinstance(pubsub_numpat, int): return str(pubsub_numpat) else: - abort(505) + abort(Response("Can't find a pubsub_numpat", 505)) @app.route("/db_check/elasticsearch") def db_check_elasticsearch(): try: conn = elasticsearch.Elasticsearch("elasticsearch") except: - abort(503) + abort(Response("Can't connect to db", 503)) if conn.indices.exists('feeds-index'): return str("Index exists, but didn't try search") @@ -247,9 +246,9 @@ def db_check_elasticsearch(): # for result in results: # return unicode(result) # else: - # abort(404) + # abort(Response("Couldn't find any search results", 504)) else: - abort(504) + abort(Response("Couldn't find feeds-index", 504)) if __name__ == "__main__": print(" ---> Starting NewsBlur DB monitor flask server...") diff --git a/media/css/vendor/jquery.tagit.css b/media/css/vendor/jquery.tagit.css new file mode 100755 index 000000000..375dee557 --- /dev/null +++ b/media/css/vendor/jquery.tagit.css @@ -0,0 +1,110 @@ +ul.tagit { + border-style: solid; + border-width: 1px; + border-color: #C6C6C6; + background: inherit; + margin-left: inherit; /* usually we don't want the regular ul margins. */ + margin-right: inherit; + padding: 5px 5px 0; + overflow: hidden; +} +ul.tagit li.tagit-choice { + display: block; + margin: 2px 5px 2px 0; + + cursor: pointer; + position: relative; + + float: left; + font-weight: normal; + font-size: 9px; + border-radius: 4px; + line-height: 14px; + + padding: 1px 16px 2px 5px; + margin: 0 4px 4px 0; + background: none; + background-color: rgba(0, 0, 0, .1); + color: #959B8B; +/* text-shadow: 0 1px 0 rgba(255, 255, 255, .5);*/ + border: 1px solid transparent; + border-color: rgba(255, 255, 255, .3) transparent rgba(0, 0, 0, .1); + +} + +ul.tagit li.tagit-new { + padding: 0; + margin: 0; + list-style: none; +} + +ul.tagit li.tagit-choice a.tagit-label { + text-decoration: none; +} +ul.tagit li.tagit-choice .tagit-close { + cursor: pointer; + position: absolute; + right: .1em; + top: 50%; + margin-top: -8px; + line-height: 17px; +} + +/* used for some custom themes that don't need image icons */ +ul.tagit li.tagit-choice .tagit-close .text-icon { + display: none; +} + +ul.tagit li.tagit-choice a.tagit-close { + text-decoration: none; +} +ul.tagit li.tagit-choice .tagit-close { + right: .4em; +} +ul.tagit li.tagit-choice .ui-icon { + display: none; +} +ul.tagit li.tagit-choice .tagit-close .text-icon { + display: inline; + font-family: arial, sans-serif; + font-size: 16px; + line-height: 16px; + color: #777; +} +ul.tagit li.tagit-choice:hover, ul.tagit li.tagit-choice.remove { + background-color: #EDADAF; + border-color: #D6565B; + color: white; +} +ul.tagit li.tagit-choice:active { + background-color: #E6888D; + border-color: #CA404A; + color: white; +} + +ul.tagit li:hover a.tagit-close .text-icon { + color: #722; +} +ul.tagit input[type="text"] { + color: #333333; + -moz-box-sizing: border-box; + -webkit-box-sizing: border-box; + box-sizing: border-box; + font-size: 11px; + + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; + + border: none; + padding: 0 1px 5px 1px; + width: inherit; + background-color: inherit; + outline: none; +} + +ul.tagit li.tagit-choice input { + display: block; + float: left; + margin: 2px 5px 2px 0; +} diff --git a/media/js/vendor/jquery.noConflict.js b/media/js/vendor/jquery.noConflict.js index d7674da11..7a5330d96 100644 --- a/media/js/vendor/jquery.noConflict.js +++ b/media/js/vendor/jquery.noConflict.js @@ -1 +1 @@ -var $ = jQuery = jQuery.noConflict(true); +window.NB_$ = jQuery = jQuery.noConflict(true); diff --git a/newsblur_web/docker_local_settings.py b/newsblur_web/docker_local_settings.py index 44d182a98..8b252c4f7 100644 --- a/newsblur_web/docker_local_settings.py +++ b/newsblur_web/docker_local_settings.py @@ -38,6 +38,8 @@ DEBUG_QUERIES_SUMMARY_ONLY = True MEDIA_URL = '/media/' IMAGES_URL = '/imageproxy' +# Uncomment below to debug iOS/Android widget +# IMAGES_URL = 'https://haproxy/imageproxy' SECRET_KEY = 'YOUR SECRET KEY' AUTO_PREMIUM_NEW_USERS = True AUTO_ENABLE_NEW_USERS = True @@ -68,9 +70,10 @@ OAUTH_SECRET = 'SECRET_KEY_FROM_GOOGLE' S3_ACCESS_KEY = 'XXX' S3_SECRET = 'SECRET' -S3_BACKUP_BUCKET = 'newsblur_backups' +S3_BACKUP_BUCKET = 'newsblur-backups' S3_PAGES_BUCKET_NAME = 'pages-XXX.newsblur.com' S3_ICONS_BUCKET_NAME = 'icons-XXX.newsblur.com' +S3_AVATARS_BUCKET_NAME = 'avatars-XXX.newsblur.com' STRIPE_SECRET = "YOUR-SECRET-API-KEY" STRIPE_PUBLISHABLE = "YOUR-PUBLISHABLE-API-KEY" @@ -166,13 +169,6 @@ if len(logging._handlerList) < 1: datefmt='%b %d %H:%M:%S', handler=logging.StreamHandler) -S3_ACCESS_KEY = '000000000000000000000' -S3_SECRET = '000000000000000000000000/0000000000000000' -S3_BACKUP_BUCKET = 'newsblur_backups' -S3_PAGES_BUCKET_NAME = 'pages-dev.newsblur.com' -S3_ICONS_BUCKET_NAME = 'icons-dev.newsblur.com' -S3_AVATARS_BUCKET_NAME = 'avatars-dev.newsblur.com' - MAILGUN_ACCESS_KEY = 'key-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' MAILGUN_SERVER_NAME = 'newsblur.com' diff --git a/newsblur_web/local_settings.py.template b/newsblur_web/local_settings.py.template deleted file mode 100644 index 09477a8de..000000000 --- a/newsblur_web/local_settings.py.template +++ /dev/null @@ -1,172 +0,0 @@ -import logging -import pymongo - -# =================== -# = Server Settings = -# =================== - -ADMINS = ( - ('Samuel Clay', 'samuel@newsblur.com'), -) - -SERVER_EMAIL = 'server@newsblur.com' -HELLO_EMAIL = 'hello@newsblur.com' -NEWSBLUR_URL = 'http://www.newsblur.com' -SESSION_COOKIE_DOMAIN = '.localhost' - -# =================== -# = Global Settings = -# =================== - -DEBUG = True -DEBUG_ASSETS = DEBUG -MEDIA_URL = '/media/' -SECRET_KEY = 'YOUR SECRET KEY' -AUTO_PREMIUM_NEW_USERS = True -AUTO_ENABLE_NEW_USERS = True -ENFORCE_SIGNUP_CAPTCHA = False - -# CACHE_BACKEND = 'dummy:///' -# CACHE_BACKEND = 'locmem:///' -# CACHE_BACKEND = 'memcached://127.0.0.1:11211' - -CACHES = { - 'default': { - 'BACKEND': 'redis_cache.RedisCache', - 'LOCATION': '127.0.0.1:6379', - 'OPTIONS': { - 'DB': 6, - 'PARSER_CLASS': 'redis.connection.HiredisParser' - }, - }, -} - -EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' - -# Set this to the username that is shown on the homepage to unauthenticated users. -HOMEPAGE_USERNAME = 'popular' - -# Google Reader OAuth API Keys -OAUTH_KEY = 'www.example.com' -OAUTH_SECRET = 'SECRET_KEY_FROM_GOOGLE' - -S3_ACCESS_KEY = 'XXX' -S3_SECRET = 'SECRET' -S3_BACKUP_BUCKET = 'newsblur_backups' -S3_PAGES_BUCKET_NAME = 'pages-XXX.newsblur.com' -S3_ICONS_BUCKET_NAME = 'icons-XXX.newsblur.com' - -STRIPE_SECRET = "YOUR-SECRET-API-KEY" -STRIPE_PUBLISHABLE = "YOUR-PUBLISHABLE-API-KEY" - -# =============== -# = Social APIs = -# =============== - -FACEBOOK_APP_ID = '111111111111111' -FACEBOOK_SECRET = '99999999999999999999999999999999' -TWITTER_CONSUMER_KEY = 'ooooooooooooooooooooo' -TWITTER_CONSUMER_SECRET = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' -YOUTUBE_API_KEY = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" - -# ============= -# = Databases = -# ============= - -DATABASES = { - 'default': { - 'NAME': 'newsblur', - 'ENGINE': 'django.db.backends.mysql', - 'USER': 'newsblur', - 'PASSWORD': '', - 'HOST': '127.0.0.1', - 'TEST': { - 'NAME': 'newsblur_test', - 'COLLATION': 'utf8_general_ci' - } - }, -} - -MONGO_DB = { - 'name': 'newsblur', - 'host': '127.0.0.1', - 'port': 27017 -} -MONGO_ANALYTICS_DB = { - 'name': 'nbanalytics', - 'host': '128.0.0.1', - 'port': 27017, -} - -MONGODB_SLAVE = { - 'host': '127.0.0.1' -} - -# Celery RabbitMQ/Redis Broker -BROKER_URL = "redis://127.0.0.1:6379/0" -CELERY_RESULT_BACKEND = BROKER_URL - -REDIS = { - 'host': '127.0.0.1', -} -REDIS_PUBSUB = { - 'host': '127.0.0.1', -} -REDIS_STORY = { - 'host': '127.0.0.1', -} -REDIS_SESSIONS = { - 'host': '127.0.0.1', - 'port': 6379 -} - -ELASTICSEARCH_FEED_HOSTS = ["127.0.0.1:9200"] -ELASTICSEARCH_STORY_HOSTS = ["127.0.0.1:9200"] - -ELASTICSEARCH_FEED_HOST = "http://127.0.0.1:9200" -ELASTICSEARCH_STORY_HOST = "http://127.0.0.1:9200" - -BACKED_BY_AWS = { - 'pages_on_node': False, - 'pages_on_s3': False, - 'icons_on_s3': False, -} - - -# =========== -# = Logging = -# =========== - -# Logging (setup for development) -LOG_TO_STREAM = True - -if len(logging._handlerList) < 1: - LOG_FILE = '~/newsblur/logs/development.log' - logging.basicConfig(level=logging.DEBUG, - format='%(asctime)-12s: %(message)s', - datefmt='%b %d %H:%M:%S', - handler=logging.StreamHandler) - -S3_ACCESS_KEY = '000000000000000000000' -S3_SECRET = '000000000000000000000000/0000000000000000' -S3_BACKUP_BUCKET = 'newsblur_backups' -S3_PAGES_BUCKET_NAME = 'pages-dev.newsblur.com' -S3_ICONS_BUCKET_NAME = 'icons-dev.newsblur.com' -S3_AVATARS_BUCKET_NAME = 'avatars-dev.newsblur.com' - -MAILGUN_ACCESS_KEY = 'key-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' -MAILGUN_SERVER_NAME = 'newsblur.com' - -DO_TOKEN_LOG = '0000000000000000000000000000000000000000000000000000000000000000' -DO_TOKEN_FABRIC = '0000000000000000000000000000000000000000000000000000000000000000' - -SERVER_NAME = "nblocalhost" -NEWSBLUR_URL = 'https://localhost' - -SESSION_ENGINE = 'redis_sessions.session' - -# CORS_ORIGIN_REGEX_WHITELIST = ('^(https?://)?(\w+\.)?nb.local\.com$', ) - -YOUTUBE_API_KEY = "000000000000000000000000000000000000000" -RECAPTCHA_SECRET_KEY = "0000000000000000000000000000000000000000" -IMAGES_SECRET_KEY = "0000000000000000000000000000000" diff --git a/newsblur_web/settings.py b/newsblur_web/settings.py index 9fefa8de2..1550afd72 100644 --- a/newsblur_web/settings.py +++ b/newsblur_web/settings.py @@ -573,7 +573,7 @@ BACKED_BY_AWS = { } PROXY_S3_PAGES = True -S3_BACKUP_BUCKET = 'newsblur_backups' +S3_BACKUP_BUCKET = 'newsblur-backups' S3_PAGES_BUCKET_NAME = 'pages.newsblur.com' S3_ICONS_BUCKET_NAME = 'icons.newsblur.com' S3_AVATARS_BUCKET_NAME = 'avatars.newsblur.com' diff --git a/templates/api/share_bookmarklet.js b/templates/api/share_bookmarklet.js index 9e2d4bc69..8db966de1 100644 --- a/templates/api/share_bookmarklet.js +++ b/templates/api/share_bookmarklet.js @@ -3,9 +3,11 @@ (function() { window.NEWSBLUR = window.NEWSBLUR || {}; var exports = undefined; - - {% include_javascripts "bookmarklet" %} - + + {% include_javascripts_raw "bookmarklet" %} + + var $ = window.NB_$; + console.log('jquery', $, window.NB_$); NEWSBLUR.Bookmarklet = function(options) { var defaults = {}; @@ -473,6 +475,7 @@ $.make('img', { src: 'data:image/png;charset=utf-8;base64,' + this.images['accept_image'] }), 'Saved' ])); + this.pre_share_check_story(); setTimeout(function() { // $.modal.close(); }, 2000); @@ -634,7 +637,7 @@ }, attach_css: function() { - var css = '{% include_stylesheets "bookmarklet" %}'; + var css = "{% include_stylesheets_raw "bookmarklet" %}"; var style = ''; if ($('#newsblur_bookmarklet_css').length) { $('#newsblur_bookmarklet_css').replaceWith(style); diff --git a/terraform/digitalocean.tf b/terraform/digitalocean.tf index 5dba77273..81c581b42 100644 --- a/terraform/digitalocean.tf +++ b/terraform/digitalocean.tf @@ -419,7 +419,7 @@ resource "digitalocean_droplet" "db-postgres" { # servers=$(for i in {1..9}; do echo -n "-target=\"digitalocean_droplet.db-mongo-primary[$i]\" " ; done); tf plan -refresh=false `eval echo $servers` # resource "digitalocean_droplet" "db-mongo-primary" { - count = 1 + count = 2 backups = true image = var.droplet_os name = "db-mongo-primary${count.index+2}" @@ -427,8 +427,8 @@ resource "digitalocean_droplet" "db-mongo-primary" { size = var.mongo_primary_droplet_size ssh_keys = [digitalocean_ssh_key.default.fingerprint] provisioner "local-exec" { - # command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120" - command = "sleep 120" + command = "/srv/newsblur/ansible/utils/generate_inventory.py; sleep 120" + # command = "sleep 120" } provisioner "local-exec" { command = "cd ..; ansible-playbook -l ${self.name} ansible/playbooks/setup_root.yml" @@ -467,18 +467,18 @@ resource "digitalocean_droplet" "db-mongo-secondary" { } resource "digitalocean_volume" "mongo_analytics_volume" { - count = 2 + count = 1 region = "nyc1" - name = "mongoanalytics${count.index==0 ? "" : count.index+1}" + name = "mongoanalytics${count.index+2}" size = 100 initial_filesystem_type = "xfs" description = "Storage for NewsBlur MongoDB Analytics" } resource "digitalocean_droplet" "db-mongo-analytics" { - count = 2 + count = 1 image = var.droplet_os - name = "db-mongo-analytics${count.index==0 ? "" : count.index+1}" + name = "db-mongo-analytics${count.index+2}" region = var.droplet_region size = var.mongo_analytics_droplet_size volume_ids = [element(digitalocean_volume.mongo_analytics_volume.*.id, count.index)] diff --git a/utils/backups/backup_mongo.py b/utils/backups/backup_mongo.py index 3d63144e8..f795d516a 100755 --- a/utils/backups/backup_mongo.py +++ b/utils/backups/backup_mongo.py @@ -1,10 +1,12 @@ #!/usr/bin/python3 from datetime import datetime, timedelta import os +import sys import re import logging import mimetypes import boto3 +import threading import shutil from boto3.s3.transfer import S3Transfer from newsblur_web import settings @@ -13,25 +15,21 @@ logger = logging.getLogger(__name__) def main(): - BACKUP_DIR = '/opt/mongo/newsblur/backup/' + BACKUP_DIR = '/srv/newsblur/backup/' filenames = [f for f in os.listdir(BACKUP_DIR) if '.tgz' in f] for filename in filenames: file_path = os.path.join(BACKUP_DIR, filename) basename = os.path.basename(file_path) - key_base, key_ext = list(splitext(basename)) - key_prefix = "".join(['mongo/', key_base]) - key_datestamp = datetime.utcnow().strftime("_%Y-%m-%d-%H-%M") - key = "".join([key_prefix, key_datestamp, key_ext]) - print("Uploading {0} to {1}".format(file_path, key)) - upload(file_path, settings.S3_BACKUP_BUCKET, key) - print('Rotating file on S3 with key prefix {0} and extension {1}'.format(key_prefix, key_ext)) - rotate(key_prefix, key_ext, settings.S3_BACKUP_BUCKET) + key_prefix = 'backup_db_mongo/' + print("Uploading {0} to {1} on {2}".format(file_path, key_prefix, settings.S3_BACKUP_BUCKET)) + sys.stdout.flush() + upload_rotate(file_path, settings.S3_BACKUP_BUCKET, key_prefix) # shutil.rmtree(filename[:-4]) - # os.remove(filename) + os.remove(file_path) -def upload_rotate(file_path, s3_bucket, s3_key_prefix, aws_key=None, aws_secret=None): +def upload_rotate(file_path, s3_bucket, s3_key_prefix): ''' Upload file_path to s3 bucket with prefix Ex. upload_rotate('/tmp/file-2015-01-01.tar.bz2', 'backups', 'foo.net/') @@ -41,26 +39,26 @@ def upload_rotate(file_path, s3_bucket, s3_key_prefix, aws_key=None, aws_secret= Ex file-2015-12-28.tar.bz2 ''' key = ''.join([s3_key_prefix, os.path.basename(file_path)]) - logger.debug("Uploading {0} to {1}".format(file_path, key)) - upload(file_path, s3_bucket, key, aws_access_key_id=aws_key, aws_secret_access_key=aws_secret) + print("Uploading {0} to {1}".format(file_path, key)) + upload(file_path, s3_bucket, key) file_root, file_ext = splitext(os.path.basename(file_path)) # strip timestamp from file_base - regex = '(?P.*)-(?P[\d]+?)-(?P[\d]+?)-(?P[\d]+?)' + regex = '(?P.*)_(?P[\d]+?)-(?P[\d]+?)-(?P[\d]+?)-(?P[\d]+?)-(?P[\d]+?)' match = re.match(regex, file_root) if not match: raise Exception('File does not contain a timestamp') key_prefix = ''.join([s3_key_prefix, match.group('filename')]) - logger.debug('Rotating files on S3 with key prefix {0} and extension {1}'.format(key_prefix, file_ext)) - rotate(key_prefix, file_ext, s3_bucket, aws_key=aws_key, aws_secret=aws_secret) + print('Rotating files on S3 with key prefix {0} and extension {1}'.format(key_prefix, file_ext)) + rotate(key_prefix, file_ext, s3_bucket) -def rotate(key_prefix, key_ext, bucket_name, daily_backups=7, weekly_backups=4, aws_key=None, aws_secret=None): +def rotate(key_prefix, key_ext, bucket_name, daily_backups=7, weekly_backups=4): """ Delete old files we've uploaded to S3 according to grandfather, father, sun strategy """ session = boto3.Session( - aws_access_key_id=aws_key, - aws_secret_access_key=aws_secret + aws_access_key_id=settings.S3_ACCESS_KEY, + aws_secret_access_key=settings.S3_SECRET ) s3 = session.resource('s3') bucket = s3.Bucket(bucket_name) @@ -108,20 +106,30 @@ def splitext( filename ): return filename[:index], filename[index:] return os.path.splitext(filename) -def upload(source_path, bucketname, keyname, acl='private', guess_mimetype=True, aws_access_key_id=None, aws_secret_access_key=None): +def upload(source_path, bucketname, keyname, acl='private', guess_mimetype=True): - client = boto3.client('s3', 'us-west-2', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) - transfer = S3Transfer(client) - # Upload /tmp/myfile to s3://bucket/key - extra_args = { - 'ACL': acl, - } - if guess_mimetype: - mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream' - extra_args['ContentType'] = mtype + client = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET) + client.upload_file(source_path, bucketname, keyname, Callback=ProgressPercentage(source_path)) - transfer.upload_file(source_path, bucketname, keyname, extra_args=extra_args) +class ProgressPercentage(object): + + def __init__(self, filename): + self._filename = filename + self._size = float(os.path.getsize(filename)) + self._seen_so_far = 0 + self._lock = threading.Lock() + + def __call__(self, bytes_amount): + # To simplify, assume this is hooked up to a single filename + with self._lock: + self._seen_so_far += bytes_amount + percentage = (self._seen_so_far / self._size) * 100 + sys.stdout.write( + "\r%s %s / %s (%.2f%%)" % ( + self._filename, self._seen_so_far, self._size, + percentage)) + sys.stdout.flush() if __name__ == "__main__": main() diff --git a/utils/backups/backup_psql.py b/utils/backups/backup_psql.py index f00ecca02..86ecab271 100644 --- a/utils/backups/backup_psql.py +++ b/utils/backups/backup_psql.py @@ -32,11 +32,14 @@ import time import boto3 from django.conf import settings +BACKUP_DIR = '/srv/newsblur/backup/' + s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET) hostname = socket.gethostname().replace('-','_') -s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime("%Y-%m-%d-%H-%M")}.rdb.gz' -path = os.listdir('/backup')[0] -print('Uploading %s (from %s) to S3...' % (s3_object_name, path)) -s3.upload_file(path, settings.S3_BACKUP_BUCKET, s3_object_name, Callback=ProgressPercentage(path)) - +s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime("%Y-%m-%d-%H-%M")}.sql' +path = os.listdir(BACKUP_DIR)[0] +full_path = os.path.join(BACKUP_DIR, path) +print('Uploading %s to %s on S3 bucket %s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET)) +s3.upload_file(full_path, settings.S3_BACKUP_BUCKET, s3_object_name, Callback=ProgressPercentage(full_path)) +os.remove(full_path) diff --git a/utils/templatetags/utils_tags.py b/utils/templatetags/utils_tags.py index 08664e0dd..613a08ff6 100644 --- a/utils/templatetags/utils_tags.py +++ b/utils/templatetags/utils_tags.py @@ -1,3 +1,5 @@ +import os +import re import struct import datetime from django.contrib.sites.models import Site @@ -10,6 +12,7 @@ from vendor.timezones.utilities import localtime_for_timezone from utils.user_functions import get_user from django.utils.safestring import mark_safe from pipeline.templatetags.pipeline import stylesheet, javascript +from pipeline.templatetags.pipeline import JavascriptNode, StylesheetNode register = template.Library() @@ -258,7 +261,42 @@ def include_javascripts(parser, token): return javascript(parser, token) # asset_type = 'javascripts' # return mark_safe(settings.JAMMIT.render_tags(asset_type, asset_package)) - + +class RawJSNode(JavascriptNode): + def render(self, context): + output = super(RawJSNode, self).render(context) + path = re.search(r"src=\"/(.*?)\"", output) + assert path + filename = path.group(1) + abs_filename = os.path.join(settings.NEWSBLUR_DIR, filename) + f = open(abs_filename, 'r') + output = f.read() + return output + +@register.tag +def include_javascripts_raw(parser, token): + """Prints out the JS code found in the static asset packages.""" + tag_name, name = token.split_contents() + scripts = RawJSNode(name) + return scripts + +class RawStylesheetNode(StylesheetNode): + def render(self, context): + output = super(RawStylesheetNode, self).render(context) + path = re.search(r"href=\"/(.*?)\"", output) + assert path + filename = path.group(1) + abs_filename = os.path.join(settings.NEWSBLUR_DIR, filename) + f = open(abs_filename, 'r') + output = f.read().replace('"', '\\"').replace('\n', '') + return output + +@register.tag +def include_stylesheets_raw(parser, token): + """Prints out the CSS code found in the static asset packages.""" + tag_name, name = token.split_contents() + scripts = RawStylesheetNode(name) + return scripts @register.tag def include_stylesheets(parser, token):