mirror of
https://github.com/viq/NewsBlur.git
synced 2025-09-18 21:43:31 +00:00
Merge branch 'master' into feed_settings
* master: (23 commits) Adding mark_story_as_unread to API. Now works flawlessly up to settings.DAYS_OF_UNREAD (currently 2 weeks). Rate limit starts at 0. Fixing cache add using the correct API. Fixing cache for ratelimiting. Rate limiting /reader/feeds and /reader/refresh_feeds. Move folder to folder now complete. Ready to launch! Moving feed to folder, end-to-end. Just needs move folder to folder. Adding Google+ and Pinboard to sharing tools. Toning down updates on infrequently updated feeds. Adding update_counts to /feeds API endpoint, to force a recalculation of feed unread counts (WARNING: slower than calling /feeds then /refresh_feeds). Finishing up realtime for testing on prod. Adding fabfile config for redis. Adding 'E' as a keyboard shortcut for the Everything view. Doubling the amount Space pages in a story. Auto-linkify urls in stories. Preparing realtime for deployment. Adding socket.io communication and feed updating on client-side. Ready to ship? Adding server half of redis real-time unread count updates. That was the easy part. Adding new API endpoint: mark_feed_stories_as_read. Refactored mark_story_as_read. Fixing issue for single story preference users: When in page view and story is not found, clicking on other stories in story titles doesn't udpate Feed view. I can't imagine how few users this actually impacts, but it's now fixed. ...
This commit is contained in:
commit
083465134d
29 changed files with 5041 additions and 129 deletions
|
@ -6,6 +6,7 @@ from django.db import models, IntegrityError
|
|||
from django.conf import settings
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.cache import cache
|
||||
from mongoengine.queryset import OperationError
|
||||
from apps.reader.managers import UserSubscriptionManager
|
||||
from apps.rss_feeds.models import Feed, MStory, DuplicateFeed
|
||||
from apps.analyzer.models import MClassifierFeed, MClassifierAuthor, MClassifierTag, MClassifierTitle
|
||||
|
@ -140,6 +141,51 @@ class UserSubscription(models.Model):
|
|||
MUserStory.delete_marked_as_read_stories(self.user.pk, self.feed.pk)
|
||||
|
||||
self.save()
|
||||
|
||||
def mark_story_ids_as_read(self, story_ids, request=None):
|
||||
data = dict(code=0, payload=story_ids)
|
||||
|
||||
if not request:
|
||||
request = self.user
|
||||
|
||||
if not self.needs_unread_recalc:
|
||||
self.needs_unread_recalc = True
|
||||
self.save()
|
||||
|
||||
if len(story_ids) > 1:
|
||||
logging.user(request, "~FYRead %s stories in feed: %s" % (len(story_ids), self.feed))
|
||||
else:
|
||||
logging.user(request, "~FYRead story in feed: %s" % (self.feed))
|
||||
|
||||
for story_id in story_ids:
|
||||
try:
|
||||
story = MStory.objects.get(story_feed_id=self.feed.pk, story_guid=story_id)
|
||||
except MStory.DoesNotExist:
|
||||
# Story has been deleted, probably by feed_fetcher.
|
||||
continue
|
||||
except MStory.MultipleObjectsReturned:
|
||||
continue
|
||||
now = datetime.datetime.utcnow()
|
||||
date = now if now > story.story_date else story.story_date # For handling future stories
|
||||
m = MUserStory(story=story, user_id=self.user.pk, feed_id=self.feed.pk, read_date=date, story_id=story_id)
|
||||
try:
|
||||
m.save()
|
||||
except OperationError, e:
|
||||
original_m = MUserStory.objects.get(story=story, user_id=self.user.pk, feed_id=self.feed.pk)
|
||||
logging.user(request, "~BRMarked story as read error: %s" % (e))
|
||||
logging.user(request, "~BRMarked story as read: %s" % (story_id))
|
||||
logging.user(request, "~BROrigin story as read: %s" % (m.story.story_guid))
|
||||
logging.user(request, "~BRMarked story id: %s" % (original_m.story_id))
|
||||
logging.user(request, "~BROrigin story guid: %s" % (original_m.story.story_guid))
|
||||
logging.user(request, "~BRRead now date: %s, original read: %s, story_date: %s." % (m.read_date, original_m.read_date, story.story_date))
|
||||
original_m.story_id = story_id
|
||||
original_m.read_date = date
|
||||
original_m.save()
|
||||
except OperationError, e:
|
||||
logging.user(request, "~BRCan't even save: %s" % (origin_m.story_id))
|
||||
pass
|
||||
|
||||
return data
|
||||
|
||||
def calculate_feed_scores(self, silent=False, stories_db=None):
|
||||
# now = datetime.datetime.strptime("2009-07-06 22:30:03", "%Y-%m-%d %H:%M:%S")
|
||||
|
@ -309,7 +355,7 @@ class UserSubscriptionFolders(models.Model):
|
|||
self.folders = json.encode(user_sub_folders)
|
||||
self.save()
|
||||
|
||||
def delete_feed(self, feed_id, in_folder):
|
||||
def delete_feed(self, feed_id, in_folder, commit_delete=True):
|
||||
def _find_feed_in_folders(old_folders, folder_name='', multiples_found=False, deleted=False):
|
||||
new_folders = []
|
||||
for k, folder in enumerate(old_folders):
|
||||
|
@ -338,7 +384,7 @@ class UserSubscriptionFolders(models.Model):
|
|||
self.folders = json.encode(user_sub_folders)
|
||||
self.save()
|
||||
|
||||
if not multiples_found and deleted:
|
||||
if not multiples_found and deleted and commit_delete:
|
||||
try:
|
||||
user_sub = UserSubscription.objects.get(user=self.user, feed=feed_id)
|
||||
except Feed.DoesNotExist:
|
||||
|
@ -353,8 +399,8 @@ class UserSubscriptionFolders(models.Model):
|
|||
user_sub.delete()
|
||||
MUserStory.objects(user_id=self.user.pk, feed_id=feed_id).delete()
|
||||
|
||||
def delete_folder(self, folder_to_delete, in_folder, feed_ids_in_folder):
|
||||
def _find_folder_in_folders(old_folders, folder_name, feeds_to_delete):
|
||||
def delete_folder(self, folder_to_delete, in_folder, feed_ids_in_folder, commit_delete=True):
|
||||
def _find_folder_in_folders(old_folders, folder_name, feeds_to_delete, deleted_folder=None):
|
||||
new_folders = []
|
||||
for k, folder in enumerate(old_folders):
|
||||
if isinstance(folder, int):
|
||||
|
@ -365,18 +411,22 @@ class UserSubscriptionFolders(models.Model):
|
|||
for f_k, f_v in folder.items():
|
||||
if f_k == folder_to_delete and folder_name == in_folder:
|
||||
logging.user(self.user, "~FBDeleting folder '~SB%s~SN' in '%s': %s" % (f_k, folder_name, folder))
|
||||
deleted_folder = folder
|
||||
else:
|
||||
nf, feeds_to_delete = _find_folder_in_folders(f_v, f_k, feeds_to_delete)
|
||||
nf, feeds_to_delete, deleted_folder = _find_folder_in_folders(f_v, f_k, feeds_to_delete, deleted_folder)
|
||||
new_folders.append({f_k: nf})
|
||||
|
||||
return new_folders, feeds_to_delete
|
||||
return new_folders, feeds_to_delete, deleted_folder
|
||||
|
||||
user_sub_folders = json.decode(self.folders)
|
||||
user_sub_folders, feeds_to_delete = _find_folder_in_folders(user_sub_folders, '', feed_ids_in_folder)
|
||||
user_sub_folders, feeds_to_delete, deleted_folder = _find_folder_in_folders(user_sub_folders, '', feed_ids_in_folder)
|
||||
self.folders = json.encode(user_sub_folders)
|
||||
self.save()
|
||||
|
||||
UserSubscription.objects.filter(user=self.user, feed__in=feeds_to_delete).delete()
|
||||
if commit_delete:
|
||||
UserSubscription.objects.filter(user=self.user, feed__in=feeds_to_delete).delete()
|
||||
|
||||
return deleted_folder
|
||||
|
||||
def rename_folder(self, folder_to_rename, new_folder_name, in_folder):
|
||||
def _find_folder_in_folders(old_folders, folder_name):
|
||||
|
@ -399,6 +449,27 @@ class UserSubscriptionFolders(models.Model):
|
|||
user_sub_folders = _find_folder_in_folders(user_sub_folders, '')
|
||||
self.folders = json.encode(user_sub_folders)
|
||||
self.save()
|
||||
|
||||
def move_feed_to_folder(self, feed_id, in_folder=None, to_folder=None):
|
||||
user_sub_folders = json.decode(self.folders)
|
||||
self.delete_feed(feed_id, in_folder, commit_delete=False)
|
||||
user_sub_folders = json.decode(self.folders)
|
||||
user_sub_folders = add_object_to_folder(int(feed_id), to_folder, user_sub_folders)
|
||||
self.folders = json.encode(user_sub_folders)
|
||||
self.save()
|
||||
|
||||
return self
|
||||
|
||||
def move_folder_to_folder(self, folder_name, in_folder=None, to_folder=None):
|
||||
user_sub_folders = json.decode(self.folders)
|
||||
deleted_folder = self.delete_folder(folder_name, in_folder, [], commit_delete=False)
|
||||
user_sub_folders = json.decode(self.folders)
|
||||
user_sub_folders = add_object_to_folder(deleted_folder, to_folder, user_sub_folders)
|
||||
self.folders = json.encode(user_sub_folders)
|
||||
self.save()
|
||||
|
||||
return self
|
||||
|
||||
|
||||
class Feature(models.Model):
|
||||
"""
|
||||
|
|
|
@ -18,6 +18,7 @@ urlpatterns = patterns('',
|
|||
url(r'^starred_stories', views.load_starred_stories, name='load-starred-stories'),
|
||||
url(r'^mark_all_as_read', views.mark_all_as_read, name='mark-all-as-read'),
|
||||
url(r'^mark_story_as_read', views.mark_story_as_read, name='mark-story-as-read'),
|
||||
url(r'^mark_feed_stories_as_read', views.mark_feed_stories_as_read, name='mark-feed-stories-as-read'),
|
||||
url(r'^mark_story_as_unread', views.mark_story_as_unread),
|
||||
url(r'^mark_story_as_starred', views.mark_story_as_starred),
|
||||
url(r'^mark_story_as_unstarred', views.mark_story_as_unstarred),
|
||||
|
@ -26,6 +27,8 @@ urlpatterns = patterns('',
|
|||
url(r'^delete_folder', views.delete_folder, name='delete-folder'),
|
||||
url(r'^rename_feed', views.rename_feed, name='rename-feed'),
|
||||
url(r'^rename_folder', views.rename_folder, name='rename-folder'),
|
||||
url(r'^move_feed_to_folder', views.move_feed_to_folder, name='move-feed-to-folder'),
|
||||
url(r'^move_folder_to_folder', views.move_folder_to_folder, name='move-folder-to-folder'),
|
||||
url(r'^add_url', views.add_url),
|
||||
url(r'^add_folder', views.add_folder),
|
||||
url(r'^add_feature', views.add_feature, name='add-feature'),
|
||||
|
|
|
@ -17,7 +17,6 @@ from django.core.validators import email_re
|
|||
from django.core.mail import EmailMultiAlternatives
|
||||
from collections import defaultdict
|
||||
from operator import itemgetter
|
||||
from mongoengine.queryset import OperationError
|
||||
from apps.recommendations.models import RecommendedFeed
|
||||
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
|
||||
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds, apply_classifier_authors, apply_classifier_tags
|
||||
|
@ -40,6 +39,7 @@ from utils.story_functions import bunch
|
|||
from utils.story_functions import story_score
|
||||
from utils import log as logging
|
||||
from utils.view_functions import get_argument_or_404
|
||||
from utils.ratelimit import ratelimit
|
||||
from vendor.timezones.utilities import localtime_for_timezone
|
||||
|
||||
SINGLE_DAY = 60*60*24
|
||||
|
@ -153,6 +153,7 @@ def autologin(request, username, secret):
|
|||
|
||||
return HttpResponseRedirect(reverse('index') + next)
|
||||
|
||||
@ratelimit(minutes=1, requests=10)
|
||||
@json.json_view
|
||||
def load_feeds(request):
|
||||
user = get_user(request)
|
||||
|
@ -160,6 +161,7 @@ def load_feeds(request):
|
|||
not_yet_fetched = False
|
||||
include_favicons = request.REQUEST.get('include_favicons', False)
|
||||
flat = request.REQUEST.get('flat', False)
|
||||
update_counts = request.REQUEST.get('update_counts', False)
|
||||
|
||||
if flat: return load_feeds_flat(request)
|
||||
|
||||
|
@ -176,6 +178,8 @@ def load_feeds(request):
|
|||
|
||||
for sub in user_subs:
|
||||
pk = sub.feed.pk
|
||||
if update_counts:
|
||||
sub.calculate_feed_scores(silent=True)
|
||||
feeds[pk] = sub.canonical(include_favicon=include_favicons)
|
||||
if feeds[pk].get('not_yet_fetched'):
|
||||
not_yet_fetched = True
|
||||
|
@ -261,6 +265,7 @@ def load_feeds_flat(request):
|
|||
data = dict(flat_folders=flat_folders, feeds=feeds, user=user.username)
|
||||
return data
|
||||
|
||||
@ratelimit(minutes=1, requests=10)
|
||||
@json.json_view
|
||||
def refresh_feeds(request):
|
||||
start = datetime.datetime.utcnow()
|
||||
|
@ -659,59 +664,41 @@ def mark_story_as_read(request):
|
|||
return dict(code=-1)
|
||||
else:
|
||||
return dict(code=-1)
|
||||
|
||||
if not usersub.needs_unread_recalc:
|
||||
usersub.needs_unread_recalc = True
|
||||
usersub.save()
|
||||
|
||||
data = dict(code=0, payload=story_ids)
|
||||
|
||||
if len(story_ids) > 1:
|
||||
logging.user(request, "~FYRead %s stories in feed: %s" % (len(story_ids), usersub.feed))
|
||||
else:
|
||||
logging.user(request, "~FYRead story in feed: %s" % (usersub.feed))
|
||||
|
||||
for story_id in story_ids:
|
||||
try:
|
||||
story = MStory.objects.get(story_feed_id=feed_id, story_guid=story_id)
|
||||
except MStory.DoesNotExist:
|
||||
# Story has been deleted, probably by feed_fetcher.
|
||||
continue
|
||||
except MStory.MultipleObjectsReturned:
|
||||
continue
|
||||
now = datetime.datetime.utcnow()
|
||||
date = now if now > story.story_date else story.story_date # For handling future stories
|
||||
m = MUserStory(story=story, user_id=request.user.pk, feed_id=feed_id, read_date=date, story_id=story_id)
|
||||
try:
|
||||
m.save()
|
||||
except OperationError, e:
|
||||
original_m = MUserStory.objects.get(story=story, user_id=request.user.pk, feed_id=feed_id)
|
||||
logging.user(request, "~BRMarked story as read error: %s" % (e))
|
||||
logging.user(request, "~BRMarked story as read: %s / %s" % (story_id, m.story.story_guid))
|
||||
logging.user(request, "~BROriginal story id: %s / %s" % (original_m.story_id, original_m.story.story_guid))
|
||||
logging.user(request, "~BRRead now date: %s, original read: %s, story_date: %s." % (m.read_date, original_m.read_date, story.story_date))
|
||||
original_m.story_id = story_id
|
||||
original_m.read_date = date
|
||||
original_m.save()
|
||||
data = usersub.mark_story_ids_as_read(story_ids, request=request)
|
||||
|
||||
return data
|
||||
|
||||
@ajax_login_required
|
||||
@json.json_view
|
||||
def mark_feed_stories_as_read(request):
|
||||
feeds_stories = request.REQUEST.get('feeds_stories', {})
|
||||
|
||||
for feed_id, story_ids in feeds_stories.items():
|
||||
try:
|
||||
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
|
||||
except (UserSubscription.DoesNotExist, Feed.DoesNotExist):
|
||||
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
|
||||
if duplicate_feed:
|
||||
try:
|
||||
usersub = UserSubscription.objects.get(user=request.user,
|
||||
feed=duplicate_feed[0].feed)
|
||||
except (UserSubscription.DoesNotExist, Feed.DoesNotExist):
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
usersub.mark_story_ids_as_read(story_ids)
|
||||
|
||||
return dict(code=1)
|
||||
|
||||
@ajax_login_required
|
||||
@json.json_view
|
||||
def mark_story_as_unread(request):
|
||||
story_id = request.POST['story_id']
|
||||
feed_id = int(request.POST['feed_id'])
|
||||
|
||||
try:
|
||||
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
|
||||
except Feed.DoesNotExist:
|
||||
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
|
||||
if duplicate_feed:
|
||||
try:
|
||||
usersub = UserSubscription.objects.get(user=request.user,
|
||||
feed=duplicate_feed[0].feed)
|
||||
except Feed.DoesNotExist:
|
||||
return dict(code=-1)
|
||||
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
|
||||
|
||||
if not usersub.needs_unread_recalc:
|
||||
usersub.needs_unread_recalc = True
|
||||
|
@ -719,9 +706,25 @@ def mark_story_as_unread(request):
|
|||
|
||||
data = dict(code=0, payload=dict(story_id=story_id))
|
||||
logging.user(request, "~FY~SBUnread~SN story in feed: %s" % (usersub.feed))
|
||||
|
||||
|
||||
story = MStory.objects(story_feed_id=feed_id, story_guid=story_id)[0]
|
||||
m = MUserStory.objects(story=story, user_id=request.user.pk, feed_id=feed_id)
|
||||
|
||||
if story.story_date < usersub.mark_read_date:
|
||||
# Story is outside the mark as read range, so invert all stories before.
|
||||
newer_stories = MStory.objects(story_feed_id=story.story_feed_id,
|
||||
story_date__gte=story.story_date,
|
||||
story_date__lte=usersub.mark_read_date
|
||||
).only('story_guid')
|
||||
newer_stories = [s.story_guid for s in newer_stories]
|
||||
usersub.mark_read_date = story.story_date - datetime.timedelta(minutes=1)
|
||||
usersub.needs_unread_recalc = True
|
||||
usersub.save()
|
||||
|
||||
# Mark stories as read only after the mark_read_date has been moved, otherwise
|
||||
# these would be ignored.
|
||||
data = usersub.mark_story_ids_as_read(newer_stories, request=request)
|
||||
|
||||
m = MUserStory.objects(story_id=story_id, user_id=request.user.pk, feed_id=feed_id)
|
||||
m.delete()
|
||||
|
||||
return data
|
||||
|
@ -742,7 +745,8 @@ def mark_feed_as_read(request):
|
|||
|
||||
us = UserSubscription.objects.get(feed=feed, user=request.user)
|
||||
try:
|
||||
us.mark_feed_read()
|
||||
if us:
|
||||
us.mark_feed_read()
|
||||
except IntegrityError:
|
||||
code = -1
|
||||
else:
|
||||
|
@ -859,6 +863,30 @@ def rename_folder(request):
|
|||
|
||||
return dict(code=1)
|
||||
|
||||
@ajax_login_required
|
||||
@json.json_view
|
||||
def move_feed_to_folder(request):
|
||||
feed_id = int(request.POST['feed_id'])
|
||||
in_folder = request.POST.get('in_folder', '')
|
||||
to_folder = request.POST.get('to_folder', '')
|
||||
|
||||
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
|
||||
user_sub_folders = user_sub_folders.move_feed_to_folder(feed_id, in_folder=in_folder, to_folder=to_folder)
|
||||
|
||||
return dict(code=1, folders=json.decode(user_sub_folders.folders))
|
||||
|
||||
@ajax_login_required
|
||||
@json.json_view
|
||||
def move_folder_to_folder(request):
|
||||
folder_name = request.POST['folder_name']
|
||||
in_folder = request.POST.get('in_folder', '')
|
||||
to_folder = request.POST.get('to_folder', '')
|
||||
|
||||
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
|
||||
user_sub_folders = user_sub_folders.move_folder_to_folder(folder_name, in_folder=in_folder, to_folder=to_folder)
|
||||
|
||||
return dict(code=1, folders=json.decode(user_sub_folders.folders))
|
||||
|
||||
@login_required
|
||||
def add_feature(request):
|
||||
if not request.user.is_staff:
|
||||
|
|
|
@ -7,6 +7,7 @@ from utils import feed_fetcher
|
|||
from utils.management_functions import daemonize
|
||||
import socket
|
||||
import datetime
|
||||
import redis
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
@ -63,6 +64,12 @@ class Command(BaseCommand):
|
|||
|
||||
options['compute_scores'] = True
|
||||
|
||||
|
||||
import pymongo
|
||||
db = pymongo.Connection(settings.MONGODB_SLAVE['host'], slave_okay=True, replicaset='nbset').newsblur
|
||||
|
||||
options['slave_db'] = db
|
||||
|
||||
disp = feed_fetcher.Dispatcher(options, num_workers)
|
||||
|
||||
feeds_queue = []
|
||||
|
|
|
@ -4,11 +4,11 @@ import random
|
|||
import re
|
||||
import math
|
||||
import mongoengine as mongo
|
||||
import redis
|
||||
import zlib
|
||||
import urllib
|
||||
from collections import defaultdict
|
||||
from operator import itemgetter
|
||||
from BeautifulSoup import BeautifulStoneSoup
|
||||
# from nltk.collocations import TrigramCollocationFinder, BigramCollocationFinder, TrigramAssocMeasures, BigramAssocMeasures
|
||||
from django.db import models
|
||||
from django.db import IntegrityError
|
||||
|
@ -550,7 +550,7 @@ class Feed(models.Model):
|
|||
self.data.feed_classifier_counts = json.encode(scores)
|
||||
self.data.save()
|
||||
|
||||
def update(self, force=False, single_threaded=True, compute_scores=True):
|
||||
def update(self, force=False, single_threaded=True, compute_scores=True, slave_db=None):
|
||||
from utils import feed_fetcher
|
||||
try:
|
||||
self.feed_address = self.feed_address % {'NEWSBLUR_DIR': settings.NEWSBLUR_DIR}
|
||||
|
@ -566,6 +566,7 @@ class Feed(models.Model):
|
|||
'single_threaded': single_threaded,
|
||||
'force': force,
|
||||
'compute_scores': compute_scores,
|
||||
'slave_db': slave_db,
|
||||
}
|
||||
disp = feed_fetcher.Dispatcher(options, 1)
|
||||
disp.add_jobs([[self.pk]])
|
||||
|
@ -624,6 +625,7 @@ class Feed(models.Model):
|
|||
# logging.debug('- Updated story in feed (%s - %s): %s / %s' % (self.feed_title, story.get('title'), len(existing_story.story_content), len(story_content)))
|
||||
story_guid = story.get('guid') or story.get('id') or story.get('link')
|
||||
original_content = None
|
||||
existing_story = MStory.objects.get(story_feed_id=existing_story.story_feed_id, story_guid=existing_story.story_guid)
|
||||
if existing_story.story_original_content_z:
|
||||
original_content = zlib.decompress(existing_story.story_original_content_z)
|
||||
elif existing_story.story_content_z:
|
||||
|
@ -903,8 +905,8 @@ class Feed(models.Model):
|
|||
# 2 subscribers:
|
||||
# 1 update per day = 1 hours
|
||||
# 10 updates = 20 minutes
|
||||
updates_per_day_delay = 12 * 60 / max(.25, ((max(0, self.active_subscribers)**.35)
|
||||
* (updates_per_month**1.2)))
|
||||
updates_per_day_delay = 6 * 60 / max(.25, ((max(0, self.active_subscribers)**.2)
|
||||
* (updates_per_month**0.25)))
|
||||
if self.premium_subscribers > 0:
|
||||
updates_per_day_delay /= min(self.active_subscribers+self.premium_subscribers, 5)
|
||||
# Lots of subscribers = lots of updates
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
from celery.task import Task
|
||||
from utils import log as logging
|
||||
from django.conf import settings
|
||||
|
||||
class UpdateFeeds(Task):
|
||||
name = 'update-feeds'
|
||||
|
@ -11,10 +12,13 @@ class UpdateFeeds(Task):
|
|||
if not isinstance(feed_pks, list):
|
||||
feed_pks = [feed_pks]
|
||||
|
||||
import pymongo
|
||||
db = pymongo.Connection(settings.MONGODB_SLAVE['host'], slave_okay=True).newsblur
|
||||
|
||||
for feed_pk in feed_pks:
|
||||
try:
|
||||
feed = Feed.objects.get(pk=feed_pk)
|
||||
feed.update()
|
||||
feed.update(slave_db=db)
|
||||
except Feed.DoesNotExist:
|
||||
logging.info(" ---> Feed doesn't exist: [%s]" % feed_pk)
|
||||
# logging.debug(' Updating: [%s] %s' % (feed_pks, feed))
|
||||
|
|
42
config/redis-init
Normal file
42
config/redis-init
Normal file
|
@ -0,0 +1,42 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Simple Redis init.d script conceived to work on Linux systems
|
||||
# as it does use of the /proc filesystem.
|
||||
|
||||
REDISPORT=6379
|
||||
EXEC=/usr/local/bin/redis-server
|
||||
CLIEXEC=/usr/local/bin/redis-cli
|
||||
|
||||
PIDFILE=/var/log/redis.pid
|
||||
CONF="/etc/redis.conf"
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
if [ -f $PIDFILE ]
|
||||
then
|
||||
echo "$PIDFILE exists, process is already running or crashed"
|
||||
else
|
||||
echo "Starting Redis server..."
|
||||
$EXEC $CONF
|
||||
fi
|
||||
;;
|
||||
stop)
|
||||
if [ ! -f $PIDFILE ]
|
||||
then
|
||||
echo "$PIDFILE does not exist, process is not running"
|
||||
else
|
||||
PID=$(cat $PIDFILE)
|
||||
echo "Stopping ..."
|
||||
$CLIEXEC -p $REDISPORT shutdown
|
||||
while [ -x /proc/${PID} ]
|
||||
do
|
||||
echo "Waiting for Redis to shutdown ..."
|
||||
sleep 1
|
||||
done
|
||||
echo "Redis stopped"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Please use start or stop as first argument"
|
||||
;;
|
||||
esac
|
465
config/redis.conf
Normal file
465
config/redis.conf
Normal file
|
@ -0,0 +1,465 @@
|
|||
# Redis configuration file example
|
||||
|
||||
# Note on units: when memory size is needed, it is possible to specifiy
|
||||
# it in the usual form of 1k 5GB 4M and so forth:
|
||||
#
|
||||
# 1k => 1000 bytes
|
||||
# 1kb => 1024 bytes
|
||||
# 1m => 1000000 bytes
|
||||
# 1mb => 1024*1024 bytes
|
||||
# 1g => 1000000000 bytes
|
||||
# 1gb => 1024*1024*1024 bytes
|
||||
#
|
||||
# units are case insensitive so 1GB 1Gb 1gB are all the same.
|
||||
|
||||
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
||||
daemonize yes
|
||||
|
||||
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
|
||||
# default. You can specify a custom pid file location here.
|
||||
pidfile /var/log/redis.pid
|
||||
|
||||
# Accept connections on the specified port, default is 6379.
|
||||
# If port 0 is specified Redis will not listen on a TCP socket.
|
||||
port 6379
|
||||
|
||||
# If you want you can bind a single interface, if the bind option is not
|
||||
# specified all the interfaces will listen for incoming connections.
|
||||
#
|
||||
# bind 127.0.0.1
|
||||
|
||||
# Specify the path for the unix socket that will be used to listen for
|
||||
# incoming connections. There is no default, so Redis will not listen
|
||||
# on a unix socket when not specified.
|
||||
#
|
||||
# unixsocket /tmp/redis.sock
|
||||
# unixsocketperm 755
|
||||
|
||||
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||
timeout 300
|
||||
|
||||
# Set server verbosity to 'debug'
|
||||
# it can be one of:
|
||||
# debug (a lot of information, useful for development/testing)
|
||||
# verbose (many rarely useful info, but not a mess like the debug level)
|
||||
# notice (moderately verbose, what you want in production probably)
|
||||
# warning (only very important / critical messages are logged)
|
||||
loglevel verbose
|
||||
|
||||
# Specify the log file name. Also 'stdout' can be used to force
|
||||
# Redis to log on the standard output. Note that if you use standard
|
||||
# output for logging but daemonize, logs will be sent to /dev/null
|
||||
logfile /var/log/redis.log
|
||||
|
||||
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
|
||||
# and optionally update the other syslog parameters to suit your needs.
|
||||
# syslog-enabled no
|
||||
|
||||
# Specify the syslog identity.
|
||||
# syslog-ident redis
|
||||
|
||||
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
|
||||
# syslog-facility local0
|
||||
|
||||
# Set the number of databases. The default database is DB 0, you can select
|
||||
# a different one on a per-connection basis using SELECT <dbid> where
|
||||
# dbid is a number between 0 and 'databases'-1
|
||||
databases 16
|
||||
|
||||
################################ SNAPSHOTTING #################################
|
||||
#
|
||||
# Save the DB on disk:
|
||||
#
|
||||
# save <seconds> <changes>
|
||||
#
|
||||
# Will save the DB if both the given number of seconds and the given
|
||||
# number of write operations against the DB occurred.
|
||||
#
|
||||
# In the example below the behaviour will be to save:
|
||||
# after 900 sec (15 min) if at least 1 key changed
|
||||
# after 300 sec (5 min) if at least 10 keys changed
|
||||
# after 60 sec if at least 10000 keys changed
|
||||
#
|
||||
# Note: you can disable saving at all commenting all the "save" lines.
|
||||
|
||||
save 900 1
|
||||
save 300 10
|
||||
save 60 10000
|
||||
|
||||
# Compress string objects using LZF when dump .rdb databases?
|
||||
# For default that's set to 'yes' as it's almost always a win.
|
||||
# If you want to save some CPU in the saving child set it to 'no' but
|
||||
# the dataset will likely be bigger if you have compressible values or keys.
|
||||
rdbcompression yes
|
||||
|
||||
# The filename where to dump the DB
|
||||
dbfilename dump.rdb
|
||||
|
||||
# The working directory.
|
||||
#
|
||||
# The DB will be written inside this directory, with the filename specified
|
||||
# above using the 'dbfilename' configuration directive.
|
||||
#
|
||||
# Also the Append Only File will be created inside this directory.
|
||||
#
|
||||
# Note that you must specify a directory here, not a file name.
|
||||
dir /var/lib/redis
|
||||
|
||||
################################# REPLICATION #################################
|
||||
|
||||
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
||||
# another Redis server. Note that the configuration is local to the slave
|
||||
# so for example it is possible to configure the slave to save the DB with a
|
||||
# different interval, or to listen to another port, and so on.
|
||||
#
|
||||
# slaveof <masterip> <masterport>
|
||||
|
||||
# If the master is password protected (using the "requirepass" configuration
|
||||
# directive below) it is possible to tell the slave to authenticate before
|
||||
# starting the replication synchronization process, otherwise the master will
|
||||
# refuse the slave request.
|
||||
#
|
||||
# masterauth <master-password>
|
||||
|
||||
# When a slave lost the connection with the master, or when the replication
|
||||
# is still in progress, the slave can act in two different ways:
|
||||
#
|
||||
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||
# still reply to client requests, possibly with out of data data, or the
|
||||
# data set may just be empty if this is the first synchronization.
|
||||
#
|
||||
# 2) if slave-serve-stale data is set to 'no' the slave will reply with
|
||||
# an error "SYNC with master in progress" to all the kind of commands
|
||||
# but to INFO and SLAVEOF.
|
||||
#
|
||||
slave-serve-stale-data yes
|
||||
|
||||
################################## SECURITY ###################################
|
||||
|
||||
# Require clients to issue AUTH <PASSWORD> before processing any other
|
||||
# commands. This might be useful in environments in which you do not trust
|
||||
# others with access to the host running redis-server.
|
||||
#
|
||||
# This should stay commented out for backward compatibility and because most
|
||||
# people do not need auth (e.g. they run their own servers).
|
||||
#
|
||||
# Warning: since Redis is pretty fast an outside user can try up to
|
||||
# 150k passwords per second against a good box. This means that you should
|
||||
# use a very strong password otherwise it will be very easy to break.
|
||||
#
|
||||
# requirepass foobared
|
||||
|
||||
# Command renaming.
|
||||
#
|
||||
# It is possilbe to change the name of dangerous commands in a shared
|
||||
# environment. For instance the CONFIG command may be renamed into something
|
||||
# of hard to guess so that it will be still available for internal-use
|
||||
# tools but not available for general clients.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
|
||||
#
|
||||
# It is also possilbe to completely kill a command renaming it into
|
||||
# an empty string:
|
||||
#
|
||||
# rename-command CONFIG ""
|
||||
|
||||
################################### LIMITS ####################################
|
||||
|
||||
# Set the max number of connected clients at the same time. By default there
|
||||
# is no limit, and it's up to the number of file descriptors the Redis process
|
||||
# is able to open. The special value '0' means no limits.
|
||||
# Once the limit is reached Redis will close all the new connections sending
|
||||
# an error 'max number of clients reached'.
|
||||
#
|
||||
# maxclients 128
|
||||
|
||||
# Don't use more memory than the specified amount of bytes.
|
||||
# When the memory limit is reached Redis will try to remove keys with an
|
||||
# EXPIRE set. It will try to start freeing keys that are going to expire
|
||||
# in little time and preserve keys with a longer time to live.
|
||||
# Redis will also try to remove objects from free lists if possible.
|
||||
#
|
||||
# If all this fails, Redis will start to reply with errors to commands
|
||||
# that will use more memory, like SET, LPUSH, and so on, and will continue
|
||||
# to reply to most read-only commands like GET.
|
||||
#
|
||||
# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
|
||||
# 'state' server or cache, not as a real DB. When Redis is used as a real
|
||||
# database the memory usage will grow over the weeks, it will be obvious if
|
||||
# it is going to use too much memory in the long run, and you'll have the time
|
||||
# to upgrade. With maxmemory after the limit is reached you'll start to get
|
||||
# errors for write operations, and this may even lead to DB inconsistency.
|
||||
#
|
||||
# maxmemory <bytes>
|
||||
|
||||
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
|
||||
# is reached? You can select among five behavior:
|
||||
#
|
||||
# volatile-lru -> remove the key with an expire set using an LRU algorithm
|
||||
# allkeys-lru -> remove any key accordingly to the LRU algorithm
|
||||
# volatile-random -> remove a random key with an expire set
|
||||
# allkeys->random -> remove a random key, any key
|
||||
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
|
||||
# noeviction -> don't expire at all, just return an error on write operations
|
||||
#
|
||||
# Note: with all the kind of policies, Redis will return an error on write
|
||||
# operations, when there are not suitable keys for eviction.
|
||||
#
|
||||
# At the date of writing this commands are: set setnx setex append
|
||||
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
|
||||
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
|
||||
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
|
||||
# getset mset msetnx exec sort
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
# maxmemory-policy volatile-lru
|
||||
|
||||
# LRU and minimal TTL algorithms are not precise algorithms but approximated
|
||||
# algorithms (in order to save memory), so you can select as well the sample
|
||||
# size to check. For instance for default Redis will check three keys and
|
||||
# pick the one that was used less recently, you can change the sample size
|
||||
# using the following configuration directive.
|
||||
#
|
||||
# maxmemory-samples 3
|
||||
|
||||
############################## APPEND ONLY MODE ###############################
|
||||
|
||||
# By default Redis asynchronously dumps the dataset on disk. If you can live
|
||||
# with the idea that the latest records will be lost if something like a crash
|
||||
# happens this is the preferred way to run Redis. If instead you care a lot
|
||||
# about your data and don't want to that a single record can get lost you should
|
||||
# enable the append only mode: when this mode is enabled Redis will append
|
||||
# every write operation received in the file appendonly.aof. This file will
|
||||
# be read on startup in order to rebuild the full dataset in memory.
|
||||
#
|
||||
# Note that you can have both the async dumps and the append only file if you
|
||||
# like (you have to comment the "save" statements above to disable the dumps).
|
||||
# Still if append only mode is enabled Redis will load the data from the
|
||||
# log file at startup ignoring the dump.rdb file.
|
||||
#
|
||||
# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
|
||||
# log file in background when it gets too big.
|
||||
|
||||
appendonly no
|
||||
|
||||
# The name of the append only file (default: "appendonly.aof")
|
||||
# appendfilename appendonly.aof
|
||||
|
||||
# The fsync() call tells the Operating System to actually write data on disk
|
||||
# instead to wait for more data in the output buffer. Some OS will really flush
|
||||
# data on disk, some other OS will just try to do it ASAP.
|
||||
#
|
||||
# Redis supports three different modes:
|
||||
#
|
||||
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
||||
# always: fsync after every write to the append only log . Slow, Safest.
|
||||
# everysec: fsync only if one second passed since the last fsync. Compromise.
|
||||
#
|
||||
# The default is "everysec" that's usually the right compromise between
|
||||
# speed and data safety. It's up to you to understand if you can relax this to
|
||||
# "no" that will will let the operating system flush the output buffer when
|
||||
# it wants, for better performances (but if you can live with the idea of
|
||||
# some data loss consider the default persistence mode that's snapshotting),
|
||||
# or on the contrary, use "always" that's very slow but a bit safer than
|
||||
# everysec.
|
||||
#
|
||||
# If unsure, use "everysec".
|
||||
|
||||
# appendfsync always
|
||||
appendfsync everysec
|
||||
# appendfsync no
|
||||
|
||||
# When the AOF fsync policy is set to always or everysec, and a background
|
||||
# saving process (a background save or AOF log background rewriting) is
|
||||
# performing a lot of I/O against the disk, in some Linux configurations
|
||||
# Redis may block too long on the fsync() call. Note that there is no fix for
|
||||
# this currently, as even performing fsync in a different thread will block
|
||||
# our synchronous write(2) call.
|
||||
#
|
||||
# In order to mitigate this problem it's possible to use the following option
|
||||
# that will prevent fsync() from being called in the main process while a
|
||||
# BGSAVE or BGREWRITEAOF is in progress.
|
||||
#
|
||||
# This means that while another child is saving the durability of Redis is
|
||||
# the same as "appendfsync none", that in pratical terms means that it is
|
||||
# possible to lost up to 30 seconds of log in the worst scenario (with the
|
||||
# default Linux settings).
|
||||
#
|
||||
# If you have latency problems turn this to "yes". Otherwise leave it as
|
||||
# "no" that is the safest pick from the point of view of durability.
|
||||
no-appendfsync-on-rewrite no
|
||||
|
||||
# Automatic rewrite of the append only file.
|
||||
# Redis is able to automatically rewrite the log file implicitly calling
|
||||
# BGREWRITEAOF when the AOF log size will growth by the specified percentage.
|
||||
#
|
||||
# This is how it works: Redis remembers the size of the AOF file after the
|
||||
# latest rewrite (or if no rewrite happened since the restart, the size of
|
||||
# the AOF at startup is used).
|
||||
#
|
||||
# This base size is compared to the current size. If the current size is
|
||||
# bigger than the specified percentage, the rewrite is triggered. Also
|
||||
# you need to specify a minimal size for the AOF file to be rewritten, this
|
||||
# is useful to avoid rewriting the AOF file even if the percentage increase
|
||||
# is reached but it is still pretty small.
|
||||
#
|
||||
# Specify a precentage of zero in order to disable the automatic AOF
|
||||
# rewrite feature.
|
||||
|
||||
auto-aof-rewrite-percentage 100
|
||||
auto-aof-rewrite-min-size 64mb
|
||||
|
||||
################################## SLOW LOG ###################################
|
||||
|
||||
# The Redis Slow Log is a system to log queries that exceeded a specified
|
||||
# execution time. The execution time does not include the I/O operations
|
||||
# like talking with the client, sending the reply and so forth,
|
||||
# but just the time needed to actually execute the command (this is the only
|
||||
# stage of command execution where the thread is blocked and can not serve
|
||||
# other requests in the meantime).
|
||||
#
|
||||
# You can configure the slow log with two parameters: one tells Redis
|
||||
# what is the execution time, in microseconds, to exceed in order for the
|
||||
# command to get logged, and the other parameter is the length of the
|
||||
# slow log. When a new command is logged the oldest one is removed from the
|
||||
# queue of logged commands.
|
||||
|
||||
# The following time is expressed in microseconds, so 1000000 is equivalent
|
||||
# to one second. Note that a negative number disables the slow log, while
|
||||
# a value of zero forces the logging of every command.
|
||||
slowlog-log-slower-than 10000
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the slow log with SLOWLOG RESET.
|
||||
slowlog-max-len 1024
|
||||
|
||||
################################ VIRTUAL MEMORY ###############################
|
||||
|
||||
### WARNING! Virtual Memory is deprecated in Redis 2.4
|
||||
### The use of Virtual Memory is strongly discouraged.
|
||||
|
||||
# Virtual Memory allows Redis to work with datasets bigger than the actual
|
||||
# amount of RAM needed to hold the whole dataset in memory.
|
||||
# In order to do so very used keys are taken in memory while the other keys
|
||||
# are swapped into a swap file, similarly to what operating systems do
|
||||
# with memory pages.
|
||||
#
|
||||
# To enable VM just set 'vm-enabled' to yes, and set the following three
|
||||
# VM parameters accordingly to your needs.
|
||||
|
||||
vm-enabled no
|
||||
# vm-enabled yes
|
||||
|
||||
# This is the path of the Redis swap file. As you can guess, swap files
|
||||
# can't be shared by different Redis instances, so make sure to use a swap
|
||||
# file for every redis process you are running. Redis will complain if the
|
||||
# swap file is already in use.
|
||||
#
|
||||
# The best kind of storage for the Redis swap file (that's accessed at random)
|
||||
# is a Solid State Disk (SSD).
|
||||
#
|
||||
# *** WARNING *** if you are using a shared hosting the default of putting
|
||||
# the swap file under /tmp is not secure. Create a dir with access granted
|
||||
# only to Redis user and configure Redis to create the swap file there.
|
||||
vm-swap-file /tmp/redis.swap
|
||||
|
||||
# vm-max-memory configures the VM to use at max the specified amount of
|
||||
# RAM. Everything that deos not fit will be swapped on disk *if* possible, that
|
||||
# is, if there is still enough contiguous space in the swap file.
|
||||
#
|
||||
# With vm-max-memory 0 the system will swap everything it can. Not a good
|
||||
# default, just specify the max amount of RAM you can in bytes, but it's
|
||||
# better to leave some margin. For instance specify an amount of RAM
|
||||
# that's more or less between 60 and 80% of your free RAM.
|
||||
vm-max-memory 0
|
||||
|
||||
# Redis swap files is split into pages. An object can be saved using multiple
|
||||
# contiguous pages, but pages can't be shared between different objects.
|
||||
# So if your page is too big, small objects swapped out on disk will waste
|
||||
# a lot of space. If you page is too small, there is less space in the swap
|
||||
# file (assuming you configured the same number of total swap file pages).
|
||||
#
|
||||
# If you use a lot of small objects, use a page size of 64 or 32 bytes.
|
||||
# If you use a lot of big objects, use a bigger page size.
|
||||
# If unsure, use the default :)
|
||||
vm-page-size 32
|
||||
|
||||
# Number of total memory pages in the swap file.
|
||||
# Given that the page table (a bitmap of free/used pages) is taken in memory,
|
||||
# every 8 pages on disk will consume 1 byte of RAM.
|
||||
#
|
||||
# The total swap size is vm-page-size * vm-pages
|
||||
#
|
||||
# With the default of 32-bytes memory pages and 134217728 pages Redis will
|
||||
# use a 4 GB swap file, that will use 16 MB of RAM for the page table.
|
||||
#
|
||||
# It's better to use the smallest acceptable value for your application,
|
||||
# but the default is large in order to work in most conditions.
|
||||
vm-pages 134217728
|
||||
|
||||
# Max number of VM I/O threads running at the same time.
|
||||
# This threads are used to read/write data from/to swap file, since they
|
||||
# also encode and decode objects from disk to memory or the reverse, a bigger
|
||||
# number of threads can help with big objects even if they can't help with
|
||||
# I/O itself as the physical device may not be able to couple with many
|
||||
# reads/writes operations at the same time.
|
||||
|
||||
# Hashes are encoded in a special way (much more memory efficient) when they
|
||||
# have at max a given numer of elements, and the biggest element does not
|
||||
# exceed a given threshold. You can configure this limits with the following
|
||||
# configuration directives.
|
||||
hash-max-zipmap-entries 512
|
||||
hash-max-zipmap-value 64
|
||||
|
||||
# Similarly to hashes, small lists are also encoded in a special way in order
|
||||
# to save a lot of space. The special representation is only used when
|
||||
# you are under the following limits:
|
||||
list-max-ziplist-entries 512
|
||||
list-max-ziplist-value 64
|
||||
|
||||
# Sets have a special encoding in just one case: when a set is composed
|
||||
# of just strings that happens to be integers in radix 10 in the range
|
||||
# of 64 bit signed integers.
|
||||
# The following configuration setting sets the limit in the size of the
|
||||
# set in order to use this special memory saving encoding.
|
||||
set-max-intset-entries 512
|
||||
|
||||
# Similarly to hashes and lists, sorted sets are also specially encoded in
|
||||
# order to save a lot of space. This encoding is only used when the length and
|
||||
# elements of a sorted set are below the following limits:
|
||||
zset-max-ziplist-entries 128
|
||||
zset-max-ziplist-value 64
|
||||
|
||||
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
||||
# order to help rehashing the main Redis hash table (the one mapping top-level
|
||||
# keys to values). The hash table implementation redis uses (see dict.c)
|
||||
# performs a lazy rehashing: the more operation you run into an hash table
|
||||
# that is rhashing, the more rehashing "steps" are performed, so if the
|
||||
# server is idle the rehashing is never complete and some more memory is used
|
||||
# by the hash table.
|
||||
#
|
||||
# The default is to use this millisecond 10 times every second in order to
|
||||
# active rehashing the main dictionaries, freeing memory when possible.
|
||||
#
|
||||
# If unsure:
|
||||
# use "activerehashing no" if you have hard latency requirements and it is
|
||||
# not a good thing in your environment that Redis can reply form time to time
|
||||
# to queries with 2 milliseconds delay.
|
||||
#
|
||||
# use "activerehashing yes" if you don't have such hard requirements but
|
||||
# want to free memory asap when possible.
|
||||
activerehashing yes
|
||||
|
||||
################################## INCLUDES ###################################
|
||||
|
||||
# Include one or more other config files here. This is useful if you
|
||||
# have a standard template that goes to all redis server but also need
|
||||
# to customize a few per-server settings. Include files can include
|
||||
# other files, so use this wisely.
|
||||
#
|
||||
# include /path/to/local.conf
|
||||
# include /path/to/other.conf
|
|
@ -6,6 +6,8 @@ export ZSH=$HOME/.oh-my-zsh
|
|||
# Look in ~/.oh-my-zsh/themes/
|
||||
export ZSH_THEME="risto"
|
||||
|
||||
export DISABLE_AUTO_UPDATE="true"
|
||||
|
||||
# Set to this to use case-sensitive completion
|
||||
export CASE_SENSITIVE="true"
|
||||
export LC_COLLATE='C'
|
||||
|
|
32
fabfile.py
vendored
32
fabfile.py
vendored
|
@ -206,9 +206,11 @@ def setup_db():
|
|||
setup_db_firewall()
|
||||
setup_db_motd()
|
||||
setup_rabbitmq()
|
||||
setup_memcached()
|
||||
setup_postgres()
|
||||
setup_mongo()
|
||||
setup_gunicorn(supervisor=False)
|
||||
setup_redis()
|
||||
|
||||
def setup_task():
|
||||
setup_common()
|
||||
|
@ -226,7 +228,7 @@ def setup_task():
|
|||
def setup_installs():
|
||||
sudo('apt-get -y update')
|
||||
sudo('apt-get -y upgrade')
|
||||
sudo('apt-get -y install build-essential gcc scons libreadline-dev sysstat iotop git zsh python-dev locate python-software-properties libpcre3-dev libdbd-pg-perl libssl-dev make pgbouncer python-psycopg2 libmemcache0 memcached python-memcache libyaml-0-2 python-yaml python-numpy python-scipy python-imaging munin munin-node munin-plugins-extra curl ntp monit')
|
||||
sudo('apt-get -y install build-essential gcc scons libreadline-dev sysstat iotop git zsh python-dev locate python-software-properties libpcre3-dev libdbd-pg-perl libssl-dev make pgbouncer python-psycopg2 libmemcache0 python-memcache libyaml-0-2 python-yaml python-numpy python-scipy python-imaging munin munin-node munin-plugins-extra curl ntp monit')
|
||||
# sudo('add-apt-repository ppa:pitti/postgresql')
|
||||
sudo('apt-get -y update')
|
||||
sudo('apt-get -y install postgresql-client')
|
||||
|
@ -289,7 +291,7 @@ def setup_psycopg():
|
|||
|
||||
def setup_python():
|
||||
sudo('easy_install pip')
|
||||
sudo('easy_install fabric django celery django-celery django-compress South django-extensions pymongo BeautifulSoup pyyaml nltk==0.9.9 lxml oauth2 pytz boto seacucumber django_ses mongoengine')
|
||||
sudo('easy_install fabric django celery django-celery django-compress South django-extensions pymongo BeautifulSoup pyyaml nltk==0.9.9 lxml oauth2 pytz boto seacucumber django_ses mongoengine redis')
|
||||
|
||||
put('config/pystartup.py', '.pystartup')
|
||||
with cd(os.path.join(env.NEWSBLUR_PATH, 'vendor/cjson')):
|
||||
|
@ -385,6 +387,12 @@ def configure_nginx():
|
|||
sudo("chmod 0755 /etc/init.d/nginx")
|
||||
sudo("/usr/sbin/update-rc.d -f nginx defaults")
|
||||
sudo("/etc/init.d/nginx restart")
|
||||
|
||||
def configure_node():
|
||||
sudo("apt-get install node")
|
||||
sudo("curl http://npmjs.org/install.sh | sudo sh")
|
||||
sudo("npm install -g redis")
|
||||
sudo("npm install -g socket.io")
|
||||
|
||||
# ===============
|
||||
# = Setup - App =
|
||||
|
@ -437,6 +445,10 @@ def setup_db_firewall():
|
|||
sudo('ufw allow from 199.15.250.0/24 to any port 27017') # MongoDB
|
||||
sudo('ufw allow from 199.15.253.0/24 to any port 5672 ') # RabbitMQ
|
||||
sudo('ufw allow from 199.15.250.0/24 to any port 5672 ') # RabbitMQ
|
||||
sudo('ufw allow from 199.15.250.0/24 to any port 6379 ') # Redis
|
||||
sudo('ufw allow from 199.15.253.0/24 to any port 6379 ') # Redis
|
||||
sudo('ufw allow from 199.15.250.0/24 to any port 11211 ') # Memcached
|
||||
sudo('ufw allow from 199.15.253.0/24 to any port 11211 ') # Memcached
|
||||
sudo('ufw --force enable')
|
||||
|
||||
def setup_db_motd():
|
||||
|
@ -453,6 +465,9 @@ def setup_rabbitmq():
|
|||
sudo('rabbitmqctl add_vhost newsblurvhost')
|
||||
sudo('rabbitmqctl set_permissions -p newsblurvhost newsblur ".*" ".*" ".*"')
|
||||
|
||||
def setup_memcached():
|
||||
sudo('apt-get -y install memcached')
|
||||
|
||||
def setup_postgres():
|
||||
sudo('apt-get -y install postgresql postgresql-client postgresql-contrib libpq-dev')
|
||||
|
||||
|
@ -462,6 +477,19 @@ def setup_mongo():
|
|||
sudo('echo "deb http://downloads-distro.mongodb.org/repo/debian-sysvinit dist 10gen" >> /etc/apt/sources.list')
|
||||
sudo('apt-get update')
|
||||
sudo('apt-get -y install mongodb-10gen')
|
||||
|
||||
def setup_redis():
|
||||
with cd(env.VENDOR_PATH):
|
||||
run('wget http://redis.googlecode.com/files/redis-2.4.2.tar.gz')
|
||||
run('tar -xzf redis-2.4.2.tar.gz')
|
||||
run('rm redis-2.4.2.tar.gz')
|
||||
with cd(os.path.join(env.VENDOR_PATH, 'redis-2.4.2')):
|
||||
sudo('make install')
|
||||
put('config/redis-init', '/etc/init.d/redis', use_sudo=True)
|
||||
sudo('chmod u+x /etc/init.d/redis')
|
||||
put('config/redis.conf', '/etc/redis.conf', use_sudo=True)
|
||||
sudo('mkdir -p /var/lib/redis')
|
||||
sudo('update-rc.d redis defaults')
|
||||
|
||||
# ================
|
||||
# = Setup - Task =
|
||||
|
|
|
@ -4174,9 +4174,6 @@ background: transparent;
|
|||
background: transparent url('../img/icons/silk/exclamation.png') no-repeat 0 1px;
|
||||
font-weight: bold;
|
||||
}
|
||||
.NB-menu-manage .NB-menu-manage-move {
|
||||
display: none;
|
||||
}
|
||||
.NB-menu-manage .NB-menu-manage-move .NB-menu-manage-image {
|
||||
background: transparent url('../img/icons/silk/arrow_branch.png') no-repeat 0px 2px;
|
||||
}
|
||||
|
@ -4285,6 +4282,12 @@ background: transparent;
|
|||
.NB-menu-manage .NB-menu-manage-story-thirdparty .NB-menu-manage-thirdparty-readitlater {
|
||||
background: transparent url('../img/reader/readitlater.png') no-repeat 0 0;
|
||||
}
|
||||
.NB-menu-manage .NB-menu-manage-story-thirdparty .NB-menu-manage-thirdparty-pinboard {
|
||||
background: transparent url('../img/reader/pinboard.png') no-repeat 0 0;
|
||||
}
|
||||
.NB-menu-manage .NB-menu-manage-story-thirdparty .NB-menu-manage-thirdparty-googleplus {
|
||||
background: transparent url('../img/reader/googleplus.png') no-repeat 0 0;
|
||||
}
|
||||
.NB-menu-manage .NB-menu-manage-story-thirdparty .NB-menu-manage-thirdparty-readability {
|
||||
background: transparent url('../img/reader/readability.png') no-repeat 0 0;
|
||||
}
|
||||
|
@ -4319,6 +4322,20 @@ background: transparent;
|
|||
.NB-menu-manage .NB-menu-manage-story-thirdparty.NB-menu-manage-highlight-readitlater .NB-menu-manage-thirdparty-readitlater {
|
||||
opacity: 1;
|
||||
}
|
||||
.NB-menu-manage .NB-menu-manage-story-thirdparty.NB-menu-manage-highlight-pinboard .NB-menu-manage-image,
|
||||
.NB-menu-manage .NB-menu-manage-story-thirdparty.NB-menu-manage-highlight-pinboard .NB-menu-manage-thirdparty-icon {
|
||||
opacity: .2;
|
||||
}
|
||||
.NB-menu-manage .NB-menu-manage-story-thirdparty.NB-menu-manage-highlight-pinboard .NB-menu-manage-thirdparty-pinboard {
|
||||
opacity: 1;
|
||||
}
|
||||
.NB-menu-manage .NB-menu-manage-story-thirdparty.NB-menu-manage-highlight-googleplus .NB-menu-manage-image,
|
||||
.NB-menu-manage .NB-menu-manage-story-thirdparty.NB-menu-manage-highlight-googleplus .NB-menu-manage-thirdparty-icon {
|
||||
opacity: .2;
|
||||
}
|
||||
.NB-menu-manage .NB-menu-manage-story-thirdparty.NB-menu-manage-highlight-googleplus .NB-menu-manage-thirdparty-googleplus {
|
||||
opacity: 1;
|
||||
}
|
||||
.NB-menu-manage .NB-menu-manage-story-thirdparty.NB-menu-manage-highlight-email .NB-menu-manage-image,
|
||||
.NB-menu-manage .NB-menu-manage-story-thirdparty.NB-menu-manage-highlight-email .NB-menu-manage-thirdparty-icon {
|
||||
opacity: .2;
|
||||
|
@ -5487,6 +5504,12 @@ background: transparent;
|
|||
.NB-modal-preferences .NB-preference-story-share label[for=NB-preference-story-share-readitlater] {
|
||||
background: transparent url('../img/reader/readitlater.png') no-repeat 0 0;
|
||||
}
|
||||
.NB-modal-preferences .NB-preference-story-share label[for=NB-preference-story-share-pinboard] {
|
||||
background: transparent url('../img/reader/pinboard.png') no-repeat 0 0;
|
||||
}
|
||||
.NB-modal-preferences .NB-preference-story-share label[for=NB-preference-story-share-googleplus] {
|
||||
background: transparent url('../img/reader/googleplus.png') no-repeat 0 0;
|
||||
}
|
||||
.NB-modal-preferences .NB-preference-story-share label[for=NB-preference-story-share-email] {
|
||||
background: transparent url('../img/reader/email.png') no-repeat 0 0;
|
||||
}
|
||||
|
|
BIN
media/img/reader/googleplus.png
Normal file
BIN
media/img/reader/googleplus.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.1 KiB |
BIN
media/img/reader/pinboard.png
Normal file
BIN
media/img/reader/pinboard.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 734 B |
|
@ -21,6 +21,8 @@
|
|||
|
||||
#define kTableViewRowHeight 65;
|
||||
#define kTableViewRiverRowHeight 81;
|
||||
#define kMarkReadActionSheet 1;
|
||||
#define kSettingsActionSheet 2;
|
||||
|
||||
@implementation FeedDetailViewController
|
||||
|
||||
|
@ -613,6 +615,10 @@
|
|||
}
|
||||
}
|
||||
|
||||
- (void)markFeedsReadWithAllStories:(BOOL)includeHidden {
|
||||
|
||||
}
|
||||
|
||||
- (IBAction)doOpenMarkReadActionSheet:(id)sender {
|
||||
UIActionSheet *options = [[UIActionSheet alloc]
|
||||
initWithTitle:appDelegate.activeFolder
|
||||
|
@ -621,17 +627,18 @@
|
|||
destructiveButtonTitle:nil
|
||||
otherButtonTitles:nil];
|
||||
|
||||
int storyCount = [appDelegate storyCount];
|
||||
NSString *visibleText = [NSString stringWithFormat:@"Mark %@ %d stor%@ read",
|
||||
storyCount == 1 ? @"this" : @"these",
|
||||
storyCount,
|
||||
storyCount == 1 ? @"y" : @"ies"];
|
||||
int unreadCount = [[appDelegate activeFeedStoryLocations] count];
|
||||
NSString *visibleText = [NSString stringWithFormat:@"Mark %@ read",
|
||||
unreadCount == 1 ?
|
||||
@"this story as" :
|
||||
[NSString stringWithFormat:@"these %d stories", unreadCount]];
|
||||
NSArray *buttonTitles = [NSArray arrayWithObjects:visibleText, @"Mark entire folder read", nil];
|
||||
for (id title in buttonTitles) {
|
||||
[options addButtonWithTitle:title];
|
||||
}
|
||||
options.cancelButtonIndex = [options addButtonWithTitle:@"Cancel"];
|
||||
|
||||
options.tag = kMarkReadActionSheet;
|
||||
[options showInView:self.view];
|
||||
[options release];
|
||||
}
|
||||
|
@ -650,13 +657,22 @@
|
|||
}
|
||||
options.cancelButtonIndex = [options addButtonWithTitle:@"Cancel"];
|
||||
|
||||
options.tag = kSettingsActionSheet;
|
||||
[options showInView:self.view];
|
||||
[options release];
|
||||
}
|
||||
|
||||
- (void)actionSheet:(UIActionSheet *)actionSheet clickedButtonAtIndex:(NSInteger)buttonIndex {
|
||||
if (buttonIndex == 0) {
|
||||
[self confirmDeleteSite];
|
||||
if (actionSheet.tag == 1) {
|
||||
if (buttonIndex == 0) {
|
||||
[self markFeedsReadWithAllStories:NO];
|
||||
} else if (buttonIndex == 1) {
|
||||
[self markFeedsReadWithAllStories:YES];
|
||||
}
|
||||
} else if (actionSheet.tag == 2) {
|
||||
if (buttonIndex == 0) {
|
||||
[self confirmDeleteSite];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
// #define BACKGROUND_REFRESH_SECONDS -5
|
||||
#define BACKGROUND_REFRESH_SECONDS -10*60
|
||||
|
||||
// #define NEWSBLUR_URL [NSString stringWithFormat:@"nb.local.host:8000"]
|
||||
#define NEWSBLUR_URL [NSString stringWithFormat:@"www.newsblur.com"]
|
||||
#define NEWSBLUR_URL [NSString stringWithFormat:@"nb.local.host:8000"]
|
||||
// #define NEWSBLUR_URL [NSString stringWithFormat:@"www.newsblur.com"]
|
||||
|
||||
#endif
|
||||
|
|
|
@ -5,6 +5,8 @@ if (typeof NEWSBLUR.Globals == 'undefined') NEWSBLUR.Globals = {};
|
|||
/* = Core NewsBlur Javascript = */
|
||||
/* ============================= */
|
||||
|
||||
var URL_REGEX = /((https?\:\/\/)|(www\.))(\S+)(\w{2,4})(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?/gi;
|
||||
|
||||
NEWSBLUR.log = function(msg) {
|
||||
try {
|
||||
if (typeof o == "object")
|
||||
|
@ -31,6 +33,36 @@ NEWSBLUR.log = function(msg) {
|
|||
|
||||
$.fn.extend({
|
||||
|
||||
autolink: function() {
|
||||
return this.each(function(){
|
||||
var desc = $(this);
|
||||
desc.textNodes().each(function(){
|
||||
var text = $(this);
|
||||
if(text && text.parent() && text.parent()[0] && text.parent()[0].nodeName != 'A') {
|
||||
text.replaceWith(this.data.replace(URL_REGEX, function($0, $1) {
|
||||
return '<a href="' + $0 +'">' + $0 + '</a>';
|
||||
}));
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
|
||||
textNodes: function() {
|
||||
var ret = [];
|
||||
|
||||
(function(el){
|
||||
if (!el) return;
|
||||
if ((el.nodeType == 3)) {
|
||||
ret.push(el);
|
||||
} else {
|
||||
for (var i=0; i < el.childNodes.length; ++i) {
|
||||
arguments.callee(el.childNodes[i]);
|
||||
}
|
||||
}
|
||||
})(this[0]);
|
||||
return $(ret);
|
||||
},
|
||||
|
||||
isScrollVisible: function($elem) {
|
||||
var docViewTop = 0; // $(this).scrollTop();
|
||||
var docViewBottom = docViewTop + $(this).height();
|
||||
|
|
|
@ -667,6 +667,32 @@ NEWSBLUR.AssetModel.Reader.prototype = {
|
|||
});
|
||||
},
|
||||
|
||||
move_feed_to_folder: function(feed_id, in_folder, to_folder, callback) {
|
||||
var pre_callback = _.bind(function(data) {
|
||||
this.folders = data.folders;
|
||||
return callback();
|
||||
}, this);
|
||||
|
||||
this.make_request('/reader/move_feed_to_folder', {
|
||||
'feed_id': feed_id,
|
||||
'in_folder': in_folder,
|
||||
'to_folder': to_folder
|
||||
}, pre_callback);
|
||||
},
|
||||
|
||||
move_folder_to_folder: function(folder_name, in_folder, to_folder, callback) {
|
||||
var pre_callback = _.bind(function(data) {
|
||||
this.folders = data.folders;
|
||||
return callback();
|
||||
}, this);
|
||||
|
||||
this.make_request('/reader/move_folder_to_folder', {
|
||||
'folder_name': folder_name,
|
||||
'in_folder': in_folder,
|
||||
'to_folder': to_folder
|
||||
}, pre_callback);
|
||||
},
|
||||
|
||||
preference: function(preference, value, callback) {
|
||||
if (typeof value == 'undefined') {
|
||||
var pref = NEWSBLUR.Preferences[preference];
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
(function($) {
|
||||
|
||||
(function($) {
|
||||
NEWSBLUR.Reader = function() {
|
||||
var self = this;
|
||||
|
||||
|
@ -1051,6 +1051,7 @@
|
|||
this.add_url_from_querystring();
|
||||
_.defer(_.bind(function() {
|
||||
this.model.load_feed_favicons($.rescope(this.make_feed_favicons, this), this.flags['favicons_downloaded'], this.flags['has_chosen_feeds']);
|
||||
this.setup_socket_realtime_unread_counts();
|
||||
}, this));
|
||||
},
|
||||
|
||||
|
@ -2104,6 +2105,7 @@
|
|||
// So just assume story not found.
|
||||
this.switch_to_correct_view(false);
|
||||
feed_position = this.scroll_to_story_in_story_feed(story, $feed_story);
|
||||
this.show_stories_preference_in_feed_view(true);
|
||||
} else {
|
||||
iframe_position = this.scroll_to_story_in_iframe(story, $iframe_story);
|
||||
this.switch_to_correct_view(iframe_position);
|
||||
|
@ -2663,6 +2665,38 @@
|
|||
this.mark_story_as_read(story_id);
|
||||
},
|
||||
|
||||
send_story_to_pinboard: function(story_id) {
|
||||
var story = this.model.get_story(story_id);
|
||||
var url = 'http://pinboard.in/add/?';
|
||||
var pinboard_url = [
|
||||
url,
|
||||
'url=',
|
||||
encodeURIComponent(story.story_permalink),
|
||||
'&title=',
|
||||
encodeURIComponent(story.story_title),
|
||||
'&tags=',
|
||||
encodeURIComponent(story.story_tags.join(', '))
|
||||
].join('');
|
||||
window.open(pinboard_url, '_blank');
|
||||
this.mark_story_as_read(story_id);
|
||||
},
|
||||
|
||||
send_story_to_googleplus: function(story_id) {
|
||||
var story = this.model.get_story(story_id);
|
||||
var url = 'https://plusone.google.com/_/+1/confirm'; //?hl=en&url=${url}
|
||||
var googleplus_url = [
|
||||
url,
|
||||
'?hl=en&url=',
|
||||
encodeURIComponent(story.story_permalink),
|
||||
'&title=',
|
||||
encodeURIComponent(story.story_title),
|
||||
'&tags=',
|
||||
encodeURIComponent(story.story_tags.join(', '))
|
||||
].join('');
|
||||
window.open(googleplus_url, '_blank');
|
||||
this.mark_story_as_read(story_id);
|
||||
},
|
||||
|
||||
send_story_to_email: function(story_id) {
|
||||
NEWSBLUR.reader_send_email = new NEWSBLUR.ReaderSendEmail(story_id);
|
||||
this.mark_story_as_read(story_id);
|
||||
|
@ -3399,7 +3433,7 @@
|
|||
$.make('span', { className: 'NB-feed-story-starred-date' }, story.starred_date))
|
||||
])
|
||||
]),
|
||||
$.make('div', { className: 'NB-feed-story-content' }, story.story_content)
|
||||
$.make('div', { className: 'NB-feed-story-content' }, this.make_story_content(story.story_content))
|
||||
]).data('story', story.id).data('story_id', story.id).data('feed_id', story.story_feed_id);
|
||||
|
||||
if (story_has_modifications && this.model.preference('show_tooltips')) {
|
||||
|
@ -3455,6 +3489,11 @@
|
|||
if (first_load) this.show_stories_preference_in_feed_view(true);
|
||||
},
|
||||
|
||||
make_story_content: function(story_content) {
|
||||
var $story_content = $('<div>').html(story_content).autolink();
|
||||
return $story_content;
|
||||
},
|
||||
|
||||
make_story_feed_title: function(story) {
|
||||
var title = story.story_title;
|
||||
var feed_titles = this.model.classifiers[story.story_feed_id] &&
|
||||
|
@ -4172,6 +4211,10 @@
|
|||
$.make('div', { className: 'NB-menu-manage-image' }),
|
||||
$.make('div', { className: 'NB-menu-manage-title' }, starred_title)
|
||||
]),
|
||||
(story.read_status && $.make('li', { className: 'NB-menu-manage-story-unread' }, [
|
||||
$.make('div', { className: 'NB-menu-manage-image' }),
|
||||
$.make('div', { className: 'NB-menu-manage-title' }, 'Mark as unread')
|
||||
])),
|
||||
$.make('li', { className: 'NB-menu-manage-story-thirdparty' }, [
|
||||
(NEWSBLUR.Preferences['story_share_facebook'] && $.make('div', { className: 'NB-menu-manage-thirdparty-icon NB-menu-manage-thirdparty-facebook'}).bind('mouseenter', _.bind(function(e) {
|
||||
$(e.target).siblings('.NB-menu-manage-title').text('Send to Facebook').parent().addClass('NB-menu-manage-highlight-facebook');
|
||||
|
@ -4188,6 +4231,16 @@
|
|||
}, this)).bind('mouseleave', _.bind(function(e) {
|
||||
$(e.target).siblings('.NB-menu-manage-title').text('Send to Instapaper').parent().removeClass('NB-menu-manage-highlight-readitlater');
|
||||
}, this))),
|
||||
(NEWSBLUR.Preferences['story_share_pinboard'] && $.make('div', { className: 'NB-menu-manage-thirdparty-icon NB-menu-manage-thirdparty-pinboard'}).bind('mouseenter', _.bind(function(e) {
|
||||
$(e.target).siblings('.NB-menu-manage-title').text('Pinboard').parent().addClass('NB-menu-manage-highlight-pinboard');
|
||||
}, this)).bind('mouseleave', _.bind(function(e) {
|
||||
$(e.target).siblings('.NB-menu-manage-title').text('Send to Instapaper').parent().removeClass('NB-menu-manage-highlight-pinboard');
|
||||
}, this))),
|
||||
(NEWSBLUR.Preferences['story_share_googleplus'] && $.make('div', { className: 'NB-menu-manage-thirdparty-icon NB-menu-manage-thirdparty-googleplus'}).bind('mouseenter', _.bind(function(e) {
|
||||
$(e.target).siblings('.NB-menu-manage-title').text('Google+').parent().addClass('NB-menu-manage-highlight-googleplus');
|
||||
}, this)).bind('mouseleave', _.bind(function(e) {
|
||||
$(e.target).siblings('.NB-menu-manage-title').text('Send to Instapaper').parent().removeClass('NB-menu-manage-highlight-googleplus');
|
||||
}, this))),
|
||||
(NEWSBLUR.Preferences['story_share_email'] && $.make('div', { className: 'NB-menu-manage-thirdparty-icon NB-menu-manage-thirdparty-email'}).bind('mouseenter', _.bind(function(e) {
|
||||
$(e.target).siblings('.NB-menu-manage-title').text('Send to email').parent().addClass('NB-menu-manage-highlight-email');
|
||||
}, this)).bind('mouseleave', _.bind(function(e) {
|
||||
|
@ -4212,6 +4265,10 @@
|
|||
this.send_story_to_readitlater(story.id);
|
||||
} else if ($target.hasClass('NB-menu-manage-thirdparty-readability')) {
|
||||
this.send_story_to_readability(story.id);
|
||||
} else if ($target.hasClass('NB-menu-manage-thirdparty-pinboard')) {
|
||||
this.send_story_to_pinboard(story.id);
|
||||
} else if ($target.hasClass('NB-menu-manage-thirdparty-googleplus')) {
|
||||
this.send_story_to_googleplus(story.id);
|
||||
} else if ($target.hasClass('NB-menu-manage-thirdparty-email')) {
|
||||
this.send_story_to_email(story.id);
|
||||
} else {
|
||||
|
@ -4224,11 +4281,6 @@
|
|||
$.make('div', { className: 'NB-menu-manage-title' }, 'Intelligence trainer'),
|
||||
$.make('div', { className: 'NB-menu-manage-subtitle' }, 'What you like and dislike.')
|
||||
])
|
||||
// (story.read_status && $.make('li', { className: 'NB-menu-separator' })),
|
||||
// (story.read_status && $.make('li', { className: 'NB-menu-manage-story-unread' }, [
|
||||
// $.make('div', { className: 'NB-menu-manage-image' }),
|
||||
// $.make('div', { className: 'NB-menu-manage-title' }, 'Mark as unread')
|
||||
// ]))
|
||||
]);
|
||||
$manage_menu.data('feed_id', feed_id);
|
||||
$manage_menu.data('story_id', story_id);
|
||||
|
@ -4403,7 +4455,7 @@
|
|||
$('.NB-task-manage').tipsy('enable');
|
||||
}
|
||||
|
||||
$item.removeClass('NB-showing-menu');
|
||||
if ($item) $item.removeClass('NB-showing-menu');
|
||||
|
||||
if (animate) {
|
||||
$manage_menu_container.stop().animate({
|
||||
|
@ -4569,15 +4621,26 @@
|
|||
$feed = $feed || this.find_feed_in_feed_list(feed_id);
|
||||
var $parent = $feed.closest('li.folder');
|
||||
var in_folder = '';
|
||||
var new_folder = $('.NB-menu-manage-feed-move-confirm select').val();
|
||||
|
||||
if (new_folder.length <= 0) return this.hide_confirm_move_menu_item();
|
||||
var to_folder = $('.NB-menu-manage-feed-move-confirm select').val();
|
||||
|
||||
if ($parent.length) {
|
||||
in_folder = $feed.eq(0).closest('li.folder').find('.folder_title_text').eq(0).text();
|
||||
}
|
||||
|
||||
if (to_folder == in_folder) return this.hide_confirm_move_menu_item();
|
||||
|
||||
this.model.move_feed_to_folder(feed_id, new_folder, function() {});
|
||||
this.model.move_feed_to_folder(feed_id, in_folder, to_folder, _.bind(function() {
|
||||
_.delay(_.bind(function() {
|
||||
this.$s.$feed_list.css('opacity', 1).animate({'opacity': 0}, {
|
||||
'duration': 100,
|
||||
'complete': _.bind(function() {
|
||||
this.make_feeds();
|
||||
}, this)
|
||||
});
|
||||
}, this), 250);
|
||||
|
||||
this.hide_manage_menu('feed', $feed, true);
|
||||
}, this));
|
||||
|
||||
this.hide_confirm_move_menu_item(true);
|
||||
},
|
||||
|
@ -4585,16 +4648,36 @@
|
|||
manage_menu_move_folder: function(folder, $folder) {
|
||||
var self = this;
|
||||
var in_folder = '';
|
||||
var $parent = $folder.closest('li.folder');
|
||||
var new_folder = $('.NB-menu-manage-folder-move-confirm select').val();
|
||||
var $parent = $folder.parents('li.folder').eq(0);
|
||||
var to_folder = $('.NB-menu-manage-folder-move-confirm select').val();
|
||||
var folder_name = $folder.find('.folder_title_text').eq(0).text();
|
||||
var child_folders = $folder.find('.folder_title_text').map(function() {
|
||||
return $(this).text();
|
||||
}).get();
|
||||
|
||||
if (new_folder.length <= 0) return this.hide_confirm_move_menu_item();
|
||||
|
||||
if ($parent.length) {
|
||||
in_folder = $parent.find('.folder_title_text').eq(0).text();
|
||||
}
|
||||
|
||||
this.model.move_folder_to_folder(folder, new_folder, in_folder, function() {});
|
||||
if (to_folder == in_folder ||
|
||||
to_folder == folder_name ||
|
||||
_.contains(child_folders, to_folder)) {
|
||||
return this.hide_confirm_move_menu_item();
|
||||
}
|
||||
|
||||
this.model.move_folder_to_folder(folder, in_folder, to_folder, _.bind(function() {
|
||||
_.delay(_.bind(function() {
|
||||
this.$s.$feed_list.css('opacity', 1).animate({'opacity': 0}, {
|
||||
'duration': 100,
|
||||
'complete': _.bind(function() {
|
||||
this.make_feeds();
|
||||
}, this)
|
||||
});
|
||||
}, this), 250);
|
||||
|
||||
this.hide_manage_menu('folder', $parent, true);
|
||||
}, this));
|
||||
|
||||
this.hide_confirm_move_menu_item(true);
|
||||
},
|
||||
|
||||
|
@ -4684,7 +4767,6 @@
|
|||
|
||||
this.model.rename_folder(folder, new_folder_name, in_folder, function() {
|
||||
});
|
||||
NEWSBLUR.log(['rename', $folder, new_folder_name]);
|
||||
$('.folder_title_text', $folder).text(new_folder_name);
|
||||
this.hide_confirm_rename_menu_item(true);
|
||||
|
||||
|
@ -4860,7 +4942,8 @@
|
|||
}
|
||||
}
|
||||
|
||||
if (this.story_view == 'feed' && this.model.preference('feed_view_single_story')) {
|
||||
if ((this.story_view == 'feed' || this.flags.page_view_showing_feed_view) &&
|
||||
this.model.preference('feed_view_single_story')) {
|
||||
// No need to show/hide feed view stories under single_story preference.
|
||||
// If the user switches to feed/page, then no animation is happening
|
||||
// and this will work anyway.
|
||||
|
@ -4930,6 +5013,25 @@
|
|||
this.model.save_exception_retry(feed_id, _.bind(this.force_feed_refresh, this, feed_id, $feed));
|
||||
},
|
||||
|
||||
setup_socket_realtime_unread_counts: function(force) {
|
||||
if (force && !this.socket) {
|
||||
this.socket = this.socket || io.connect('http://' + window.location.hostname + ':8888');
|
||||
|
||||
// this.socket.refresh_feeds = _.debounce(_.bind(this.force_feeds_refresh, this), 1000*10);
|
||||
|
||||
this.socket.on('connect', _.bind(function() {
|
||||
this.socket.emit('subscribe:feeds', _.keys(this.model.feeds));
|
||||
this.socket.on('feed:update', _.bind(function(feed_id, message) {
|
||||
console.log(['Feed update', feed_id, message]);
|
||||
this.force_feeds_refresh(false, false, parseInt(feed_id, 10));
|
||||
}, this));
|
||||
|
||||
this.flags.feed_refreshing_in_realtime = true;
|
||||
this.setup_feed_refresh();
|
||||
}, this));
|
||||
}
|
||||
},
|
||||
|
||||
setup_feed_refresh: function(new_feeds) {
|
||||
var self = this;
|
||||
var refresh_interval = this.constants.FEED_REFRESH_INTERVAL;
|
||||
|
@ -4944,6 +5046,9 @@
|
|||
if (feed_count > 500) {
|
||||
refresh_interval *= 1.5;
|
||||
}
|
||||
if (this.flags.feed_refreshing_in_realtime) {
|
||||
refresh_interval *= 20;
|
||||
}
|
||||
|
||||
if (new_feeds && feed_count < 250) {
|
||||
refresh_interval = (1000 * 60) * 1/10;
|
||||
|
@ -5895,7 +6000,7 @@
|
|||
e.stopPropagation();
|
||||
var folder_name = $t.parents('.NB-menu-manage').data('folder_name');
|
||||
var $folder = $t.parents('.NB-menu-manage').data('$folder');
|
||||
self.manage_menu_rename_folder(folder_name, $folder);
|
||||
self.manage_menu_move_folder(folder_name, $folder);
|
||||
});
|
||||
$.targetIs(e, { tagSelector: '.NB-menu-manage-feed-move-save' }, function($t, $p){
|
||||
e.preventDefault();
|
||||
|
@ -6473,11 +6578,11 @@
|
|||
});
|
||||
$document.bind('keydown', 'space', function(e) {
|
||||
e.preventDefault();
|
||||
self.page_in_story(0.2, 1);
|
||||
self.page_in_story(0.4, 1);
|
||||
});
|
||||
$document.bind('keydown', 'shift+space', function(e) {
|
||||
e.preventDefault();
|
||||
self.page_in_story(0.4, -1);
|
||||
self.page_in_story(0.6, -1);
|
||||
});
|
||||
$document.bind('keydown', 'u', function(e) {
|
||||
e.preventDefault();
|
||||
|
@ -6543,6 +6648,10 @@
|
|||
self.mark_feed_as_read();
|
||||
}
|
||||
});
|
||||
$document.bind('keydown', 'shift+e', function(e) {
|
||||
e.preventDefault();
|
||||
self.open_river_stories();
|
||||
});
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
@ -129,6 +129,7 @@ NEWSBLUR.ReaderFeedchooser.prototype = {
|
|||
var $paypal = $('.NB-feedchooser-paypal', this.$modal);
|
||||
$.get('/profile/paypal_form', function(response) {
|
||||
$paypal.html(response);
|
||||
self.choose_dollar_amount(2);
|
||||
});
|
||||
},
|
||||
|
||||
|
|
|
@ -400,6 +400,14 @@ _.extend(NEWSBLUR.ReaderPreferences.prototype, {
|
|||
$.make('input', { type: 'checkbox', id: 'NB-preference-story-share-instapaper', name: 'story_share_instapaper' }),
|
||||
$.make('label', { 'for': 'NB-preference-story-share-instapaper' })
|
||||
]),
|
||||
$.make('div', { className: 'NB-preference-option', title: 'Pinboard.in' }, [
|
||||
$.make('input', { type: 'checkbox', id: 'NB-preference-story-share-pinboard', name: 'story_share_pinboard' }),
|
||||
$.make('label', { 'for': 'NB-preference-story-share-pinboard' })
|
||||
]),
|
||||
$.make('div', { className: 'NB-preference-option', title: 'Google+' }, [
|
||||
$.make('input', { type: 'checkbox', id: 'NB-preference-story-share-googleplus', name: 'story_share_googleplus' }),
|
||||
$.make('label', { 'for': 'NB-preference-story-share-googleplus' })
|
||||
]),
|
||||
$.make('div', { className: 'NB-preference-option', title: 'Read It Later' }, [
|
||||
$.make('input', { type: 'checkbox', id: 'NB-preference-story-share-readitlater', name: 'story_share_readitlater' }),
|
||||
$.make('label', { 'for': 'NB-preference-story-share-readitlater' })
|
||||
|
|
|
@ -99,7 +99,7 @@ NEWSBLUR.utils = {
|
|||
var $option = $.make('option', { value: '' }, "Top Level");
|
||||
$options.append($option);
|
||||
|
||||
$options = this.make_folder_options($options, folders, '-');
|
||||
$options = this.make_folder_options($options, folders, ' ');
|
||||
|
||||
return $options;
|
||||
},
|
||||
|
@ -112,7 +112,7 @@ NEWSBLUR.utils = {
|
|||
var folder = item[o];
|
||||
var $option = $.make('option', { value: o }, depth + ' ' + o);
|
||||
$options.append($option);
|
||||
$options = this.make_folder_options($options, folder, depth+'-');
|
||||
$options = this.make_folder_options($options, folder, depth+' ');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
3750
media/js/socket.io-client.0.8.7.js
Normal file
3750
media/js/socket.io-client.0.8.7.js
Normal file
File diff suppressed because one or more lines are too long
21
node/unread_counts.cs
Normal file
21
node/unread_counts.cs
Normal file
|
@ -0,0 +1,21 @@
|
|||
fs = require 'fs'
|
||||
io = require('socket.io').listen 8888
|
||||
redis = require 'redis'
|
||||
client = redis.createClient()
|
||||
|
||||
io.sockets.on 'connection', (socket) ->
|
||||
|
||||
socket.on 'subscribe:feeds', (feeds) ->
|
||||
socket.subscribe = redis.createClient()
|
||||
|
||||
console.log "Subscribing to #{feeds.length} feeds"
|
||||
socket.subscribe.subscribe feeds
|
||||
|
||||
socket.subscribe.on 'message', (channel, message) ->
|
||||
console.log "Update on #{channel}: #{message}"
|
||||
socket.emit 'feed:update', channel
|
||||
|
||||
socket.on 'disconnect', () ->
|
||||
socket.subscribe?.end()
|
||||
console.log 'Disconnect'
|
||||
|
29
node/unread_counts.js
Normal file
29
node/unread_counts.js
Normal file
|
@ -0,0 +1,29 @@
|
|||
(function() {
|
||||
var client, fs, io, redis;
|
||||
|
||||
fs = require('fs');
|
||||
|
||||
io = require('socket.io').listen(8888);
|
||||
|
||||
redis = require('redis');
|
||||
|
||||
client = redis.createClient();
|
||||
|
||||
io.sockets.on('connection', function(socket) {
|
||||
socket.on('subscribe:feeds', function(feeds) {
|
||||
socket.subscribe = redis.createClient();
|
||||
console.log("Subscribing to " + feeds.length + " feeds");
|
||||
socket.subscribe.subscribe(feeds);
|
||||
return socket.subscribe.on('message', function(channel, message) {
|
||||
console.log("Update on " + channel + ": " + message);
|
||||
return socket.emit('feed:update', channel);
|
||||
});
|
||||
});
|
||||
return socket.on('disconnect', function() {
|
||||
var _ref;
|
||||
if ((_ref = socket.subscribe) != null) _ref.end();
|
||||
return console.log('Disconnect');
|
||||
});
|
||||
});
|
||||
|
||||
}).call(this);
|
26
settings.py
26
settings.py
|
@ -2,6 +2,7 @@ import sys
|
|||
import logging
|
||||
import os
|
||||
from mongoengine import connect
|
||||
import redis
|
||||
|
||||
# ===========================
|
||||
# = Directory Declaractions =
|
||||
|
@ -182,6 +183,7 @@ COMPRESS_JS = {
|
|||
'js/jquery.fieldselection.js',
|
||||
'js/jquery.flot.js',
|
||||
'js/jquery.tipsy.js',
|
||||
# 'js/socket.io-client.0.8.7.js',
|
||||
'js/underscore.js',
|
||||
'js/underscore.string.js',
|
||||
'js/newsblur/reader_utils.js',
|
||||
|
@ -420,7 +422,23 @@ class MasterSlaveRouter(object):
|
|||
def allow_syncdb(self, db, model):
|
||||
"Explicitly put all models on all databases."
|
||||
return True
|
||||
|
||||
|
||||
# =========
|
||||
# = Redis =
|
||||
# =========
|
||||
|
||||
REDIS = {
|
||||
'host': 'db02',
|
||||
}
|
||||
|
||||
# ===========
|
||||
# = MongoDB =
|
||||
# ===========
|
||||
|
||||
MONGODB_SLAVE = {
|
||||
'host': 'db01'
|
||||
}
|
||||
|
||||
# ==================
|
||||
# = Configurations =
|
||||
# ==================
|
||||
|
@ -455,3 +473,9 @@ MONGO_DB_DEFAULTS = {
|
|||
}
|
||||
MONGO_DB = dict(MONGO_DB_DEFAULTS, **MONGO_DB)
|
||||
MONGODB = connect(MONGO_DB.pop('name'), **MONGO_DB)
|
||||
|
||||
# =========
|
||||
# = Redis =
|
||||
# =========
|
||||
|
||||
REDIS_POOL = redis.ConnectionPool(host=REDIS['host'], port=6379, db=0)
|
||||
|
|
|
@ -377,7 +377,7 @@ $(document).ready(function() {
|
|||
<div class="NB-module-item-title">
|
||||
<span class="NB-raquo">»</span>
|
||||
<!-- <a href="#" class="NB-splash-link">Download NewsBlur on the App Store</a> -->
|
||||
<span class="NB-module-mobile-freeforpremium">Approved, but not Good Enough. Working on v1.1.</span>
|
||||
<span class="NB-module-mobile-freeforpremium">Approved, but not Good Enough. Working on v1.2.</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="NB-module-item NB-last {% if user_profile.hide_mobile %}NB-hidden{% endif %}">
|
||||
|
|
|
@ -84,6 +84,15 @@
|
|||
optional: true
|
||||
default: "false"
|
||||
example: "true/false"
|
||||
- key: update_counts
|
||||
desc: >
|
||||
Forces recalculation of unread counts on all feeds. The preferred method is
|
||||
to call this endpoint without updated counts, then call refresh_feeds to get
|
||||
updated counts. That way you can quickly show the user's feeds, then update
|
||||
the counts. Turning this option on will lead to a slower load-time.
|
||||
optional: true
|
||||
default: "false"
|
||||
example: "true/false"
|
||||
|
||||
- url: /reader/favicons
|
||||
method: GET
|
||||
|
@ -228,6 +237,37 @@
|
|||
required: true
|
||||
example: "42"
|
||||
|
||||
- url: /reader/mark_feed_stories_as_read
|
||||
method: POST
|
||||
short_desc: "Mark stories from multiple feeds as read."
|
||||
long_desc:
|
||||
- "Marks multiple stories as read."
|
||||
- "Multiple story ids can be sent at once."
|
||||
- "Multiple feeds can be sent."
|
||||
tips:
|
||||
- "Throttle requests to this endpoint. You don't need to send one request per story."
|
||||
- "Queue up to 5 stories or once every 10 seconds before firing, whichever comes first."
|
||||
params:
|
||||
- key: feeds_stories
|
||||
desc: "Dictionary of feed_ids to an array of story_ids."
|
||||
required: true
|
||||
example: "{<br>12: ['story_id_1', 'story_id_2'],<br>24: ['story_id_3']<br>}"
|
||||
|
||||
- url: /reader/mark_story_as_unread
|
||||
method: POST
|
||||
short_desc: "Mark a story as unread."
|
||||
long_desc:
|
||||
- "Mark a story as unread."
|
||||
params:
|
||||
- key: story_id
|
||||
desc: "Story id to mark unread."
|
||||
required: true
|
||||
example: "http://www.ofbrooklyn.com/story-title"
|
||||
- key: feed_id
|
||||
desc: "Feed id that the story is from."
|
||||
required: true
|
||||
example: "42"
|
||||
|
||||
- url: /reader/mark_story_as_starred
|
||||
method: POST
|
||||
short_desc: "Mark a story as starred (saved)."
|
||||
|
@ -245,17 +285,17 @@
|
|||
|
||||
- url: /reader/mark_feed_as_read
|
||||
method: POST
|
||||
short_desc: "Mark all stories in a feed as read."
|
||||
short_desc: "Mark a list of feeds as read."
|
||||
long_desc:
|
||||
- "Mark all stories in a feed or list of feeds as read."
|
||||
- "Mark a list of feeds as read."
|
||||
params:
|
||||
- key: feed_id
|
||||
desc: "List of feed ids to mark as read."
|
||||
required: true
|
||||
example: "[12, 24, 36]"
|
||||
tips:
|
||||
- "To mark a folder as read, send the ids of each feed inside the folder."
|
||||
params:
|
||||
- key: feed_ids
|
||||
desc: "List of feed ids to mark as read."
|
||||
optional: true
|
||||
example: "[12, 24, 36]"
|
||||
|
||||
|
||||
- url: /reader/mark_all_as_read
|
||||
method: POST
|
||||
short_desc: "Mark all stories from all feeds as read."
|
||||
|
@ -303,6 +343,52 @@
|
|||
default: "[Top Level]"
|
||||
example: "All Blogs"
|
||||
|
||||
- url: /reader/move_feed_to_folder
|
||||
method: POST
|
||||
short_desc: "Move a feed into a different folder."
|
||||
long_desc:
|
||||
- "Move a feed into a different folder."
|
||||
params:
|
||||
- key: feed_id
|
||||
desc: "Feed id."
|
||||
required: true
|
||||
example: 12
|
||||
- key: in_folder
|
||||
desc: >
|
||||
Current folder the feed is in. Necessary to disambiguate if a feed is in
|
||||
multiple folders.
|
||||
required: true
|
||||
example: "Blogs"
|
||||
- key: in_folder
|
||||
desc: "Folder the feed is going into."
|
||||
required: true
|
||||
example: "Tumblrs"
|
||||
tips:
|
||||
- "Leave folder names blank to specify Top Level."
|
||||
|
||||
- url: /reader/move_folder_to_folder
|
||||
method: POST
|
||||
short_desc: "Move a folder into a different folder."
|
||||
long_desc:
|
||||
- "Move a folder into a different folder."
|
||||
params:
|
||||
- key: folder_name
|
||||
desc: "Name of folder being moved."
|
||||
required: true
|
||||
example: "Tumblrs"
|
||||
- key: in_folder
|
||||
desc: >
|
||||
Current folder the folder is in. Necessary to disambiguate if a folder
|
||||
name is in multiple folders. (Please don't let this happen.)
|
||||
required: true
|
||||
example: "Blogs"
|
||||
- key: in_folder
|
||||
desc: "New folder the existing folder is going into."
|
||||
required: true
|
||||
example: "Daily Blogs"
|
||||
tips:
|
||||
- "Leave folder names blank to specify Top Level."
|
||||
|
||||
- url: /reader/rename_feed
|
||||
method: POST
|
||||
short_desc: "Rename a feed title."
|
||||
|
@ -374,17 +460,7 @@
|
|||
desc: "List of feed ids in the folder that's being deleted. These feeds also get removed."
|
||||
optional: true
|
||||
example: "[12, 24, 36]"
|
||||
|
||||
- url: /reader/mark_feed_as_read
|
||||
method: POST
|
||||
short_desc: "Mark a list of feeds as read."
|
||||
long_desc:
|
||||
- "Mark a list of feeds as read."
|
||||
params:
|
||||
- key: feed_id
|
||||
desc: "List of feed ids to mark as read."
|
||||
required: true
|
||||
example: "[12, 24, 36]"
|
||||
|
||||
|
||||
- url: /reader/save_feed_order
|
||||
method: POST
|
||||
|
|
|
@ -11,12 +11,14 @@ from utils import feedparser
|
|||
from utils.story_functions import pre_process_story
|
||||
from utils import log as logging
|
||||
from utils.feed_functions import timelimit, TimeoutError, mail_feed_error_to_admin, utf8encode
|
||||
from utils.story_functions import bunch
|
||||
import time
|
||||
import datetime
|
||||
import traceback
|
||||
import multiprocessing
|
||||
import urllib2
|
||||
import xml.sax
|
||||
import redis
|
||||
|
||||
# Refresh feed code adapted from Feedjack.
|
||||
# http://feedjack.googlecode.com
|
||||
|
@ -214,11 +216,24 @@ class ProcessFeed:
|
|||
# if story.get('published') > end_date:
|
||||
# end_date = story.get('published')
|
||||
story_guids.append(story.get('guid') or story.get('link'))
|
||||
existing_stories = list(MStory.objects(
|
||||
# story_guid__in=story_guids,
|
||||
story_date__gte=start_date,
|
||||
story_feed_id=self.feed.pk
|
||||
).limit(len(story_guids)))
|
||||
|
||||
if self.options['slave_db']:
|
||||
slave_db = self.options['slave_db']
|
||||
stories_db_orig = slave_db.stories.find({
|
||||
"story_feed_id": self.feed.pk,
|
||||
"story_date": {
|
||||
"$gte": start_date,
|
||||
},
|
||||
}).limit(len(story_guids))
|
||||
existing_stories = []
|
||||
for story in stories_db_orig:
|
||||
existing_stories.append(bunch(story))
|
||||
else:
|
||||
existing_stories = list(MStory.objects(
|
||||
# story_guid__in=story_guids,
|
||||
story_date__gte=start_date,
|
||||
story_feed_id=self.feed.pk
|
||||
).limit(len(story_guids)))
|
||||
|
||||
# MStory.objects(
|
||||
# (Q(story_date__gte=start_date) & Q(story_date__lte=end_date))
|
||||
|
@ -227,10 +242,9 @@ class ProcessFeed:
|
|||
# ).order_by('-story_date')
|
||||
ret_values = self.feed.add_update_stories(self.fpf.entries, existing_stories)
|
||||
|
||||
logging.debug(u' ---> [%-30s] Parsed Feed: %s' % (
|
||||
logging.debug(u' ---> [%-30s] ~FYParsed Feed: new~FG=~FG~SB%s~SN~FY up~FG=~FY~SB%s~SN same~FG=~FY%s err~FG=~FR~SB%s' % (
|
||||
unicode(self.feed)[:30],
|
||||
u' '.join(u'%s=%d' % (self.entry_trans[key],
|
||||
ret_values[key]) for key in self.entry_keys),))
|
||||
ret_values[ENTRY_NEW], ret_values[ENTRY_UPDATED], ret_values[ENTRY_SAME], ret_values[ENTRY_ERR]))
|
||||
self.feed.update_all_statistics()
|
||||
self.feed.trim_feed()
|
||||
self.feed.save_feed_history(200, "OK")
|
||||
|
@ -379,6 +393,9 @@ class Dispatcher:
|
|||
except IntegrityError:
|
||||
logging.debug(" ---> [%-30s] IntegrityError on feed: %s" % (unicode(feed)[:30], feed.feed_address,))
|
||||
|
||||
if ret_entries[ENTRY_NEW]:
|
||||
self.publish_to_subscribers(feed)
|
||||
|
||||
done_msg = (u'%2s ---> [%-30s] Processed in %s (%s) [%s]' % (
|
||||
identity, feed.feed_title[:30], unicode(delta),
|
||||
feed.pk, self.feed_trans[ret_feed],))
|
||||
|
@ -390,6 +407,15 @@ class Dispatcher:
|
|||
|
||||
# time_taken = datetime.datetime.utcnow() - self.time_start
|
||||
|
||||
def publish_to_subscribers(self, feed):
|
||||
try:
|
||||
r = redis.Redis(connection_pool=settings.REDIS_POOL)
|
||||
listeners_count = r.publish(str(feed.pk), 'story:new')
|
||||
if listeners_count:
|
||||
logging.debug(" ---> [%-30s] Published to %s subscribers" % (unicode(feed)[:30], listeners_count))
|
||||
except redis.ConnectionError:
|
||||
logging.debug(" ***> [%-30s] Redis is unavailable for real-time." % (unicode(feed)[:30],))
|
||||
|
||||
@timelimit(20)
|
||||
def count_unreads_for_subscribers(self, feed):
|
||||
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
|
||||
|
@ -401,8 +427,21 @@ class Dispatcher:
|
|||
unicode(feed)[:30], user_subs.count(),
|
||||
feed.num_subscribers, feed.active_subscribers, feed.premium_subscribers))
|
||||
|
||||
stories_db = MStory.objects(story_feed_id=feed.pk,
|
||||
story_date__gte=UNREAD_CUTOFF)
|
||||
if self.options['slave_db']:
|
||||
slave_db = self.options['slave_db']
|
||||
|
||||
stories_db_orig = slave_db.stories.find({
|
||||
"story_feed_id": feed.pk,
|
||||
"story_date": {
|
||||
"$gte": UNREAD_CUTOFF,
|
||||
},
|
||||
})
|
||||
stories_db = []
|
||||
for story in stories_db_orig:
|
||||
stories_db.append(bunch(story))
|
||||
else:
|
||||
stories_db = MStory.objects(story_feed_id=feed.pk,
|
||||
story_date__gte=UNREAD_CUTOFF)
|
||||
for sub in user_subs:
|
||||
cache.delete('usersub:%s' % sub.user_id)
|
||||
sub.needs_unread_recalc = True
|
||||
|
|
106
utils/ratelimit.py
Normal file
106
utils/ratelimit.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
from django.http import HttpResponseForbidden
|
||||
from django.core.cache import cache
|
||||
from datetime import datetime, timedelta
|
||||
import functools, sha
|
||||
|
||||
class ratelimit(object):
|
||||
"Instances of this class can be used as decorators"
|
||||
# This class is designed to be sub-classed
|
||||
minutes = 1 # The time period
|
||||
requests = 4 # Number of allowed requests in that time period
|
||||
|
||||
prefix = 'rl-' # Prefix for memcache key
|
||||
|
||||
def __init__(self, **options):
|
||||
for key, value in options.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
def __call__(self, fn):
|
||||
def wrapper(request, *args, **kwargs):
|
||||
return self.view_wrapper(request, fn, *args, **kwargs)
|
||||
functools.update_wrapper(wrapper, fn)
|
||||
return wrapper
|
||||
|
||||
def view_wrapper(self, request, fn, *args, **kwargs):
|
||||
if not self.should_ratelimit(request):
|
||||
return fn(request, *args, **kwargs)
|
||||
|
||||
counts = self.get_counters(request).values()
|
||||
|
||||
# Increment rate limiting counter
|
||||
self.cache_incr(self.current_key(request))
|
||||
|
||||
# Have they failed?
|
||||
if sum(counts) >= self.requests:
|
||||
return self.disallowed(request)
|
||||
|
||||
return fn(request, *args, **kwargs)
|
||||
|
||||
def cache_get_many(self, keys):
|
||||
return cache.get_many(keys)
|
||||
|
||||
def cache_incr(self, key):
|
||||
# memcache is only backend that can increment atomically
|
||||
try:
|
||||
# add first, to ensure the key exists
|
||||
cache.add(key, 0, self.expire_after())
|
||||
cache.incr(key)
|
||||
except AttributeError:
|
||||
cache.set(key, cache.get(key, 0) + 1, self.expire_after())
|
||||
|
||||
def should_ratelimit(self, request):
|
||||
return True
|
||||
|
||||
def get_counters(self, request):
|
||||
return self.cache_get_many(self.keys_to_check(request))
|
||||
|
||||
def keys_to_check(self, request):
|
||||
extra = self.key_extra(request)
|
||||
now = datetime.now()
|
||||
return [
|
||||
'%s%s-%s' % (
|
||||
self.prefix,
|
||||
extra,
|
||||
(now - timedelta(minutes = minute)).strftime('%Y%m%d%H%M')
|
||||
) for minute in range(self.minutes + 1)
|
||||
]
|
||||
|
||||
def current_key(self, request):
|
||||
return '%s%s-%s' % (
|
||||
self.prefix,
|
||||
self.key_extra(request),
|
||||
datetime.now().strftime('%Y%m%d%H%M')
|
||||
)
|
||||
|
||||
def key_extra(self, request):
|
||||
key = getattr(request.session, 'session_key', '')
|
||||
if not key:
|
||||
key = request.META.get('HTTP_X_FORWARDED_FOR', '').split(',')[0]
|
||||
if not key:
|
||||
key = request.COOKIES.get('newsblur_sessionid', '')
|
||||
if not key:
|
||||
key = request.META.get('HTTP_USER_AGENT', '')
|
||||
return key
|
||||
|
||||
def disallowed(self, request):
|
||||
"Over-ride this method if you want to log incidents"
|
||||
return HttpResponseForbidden('Rate limit exceeded')
|
||||
|
||||
def expire_after(self):
|
||||
"Used for setting the memcached cache expiry"
|
||||
return (self.minutes + 1) * 60
|
||||
|
||||
class ratelimit_post(ratelimit):
|
||||
"Rate limit POSTs - can be used to protect a login form"
|
||||
key_field = None # If provided, this POST var will affect the rate limit
|
||||
|
||||
def should_ratelimit(self, request):
|
||||
return request.method == 'POST'
|
||||
|
||||
def key_extra(self, request):
|
||||
# IP address and key_field (if it is set)
|
||||
extra = super(ratelimit_post, self).key_extra(request)
|
||||
if self.key_field:
|
||||
value = sha.new(request.POST.get(self.key_field, '')).hexdigest()
|
||||
extra += '-' + value
|
||||
return extra
|
Loading…
Add table
Reference in a new issue