Moving river blurblog to no longer use MUserStory.

This commit is contained in:
Samuel Clay 2013-05-03 14:46:44 -07:00
parent 882a307026
commit 1a04bcd400
3 changed files with 41 additions and 37 deletions

View file

@ -188,7 +188,8 @@ class UserSubscription(models.Model):
story_hashes = us.get_stories(offset=0, limit=200, story_hashes = us.get_stories(offset=0, limit=200,
order=order, read_filter=read_filter, order=order, read_filter=read_filter,
withscores=True) withscores=True)
unread_feed_story_hashes[feed_id] = us.get_stories(read_filter='unread', limit=500, fetch_stories=False) unread_feed_story_hashes[feed_id] = us.get_stories(read_filter='unread', limit=500,
fetch_stories=False)
if story_hashes: if story_hashes:
r.zadd(ranked_stories_keys, **dict(story_hashes)) r.zadd(ranked_stories_keys, **dict(story_hashes))

View file

@ -18,6 +18,7 @@ from django.core.urlresolvers import reverse
from django.template.loader import render_to_string from django.template.loader import render_to_string
from django.template.defaultfilters import slugify from django.template.defaultfilters import slugify
from django.core.mail import EmailMultiAlternatives from django.core.mail import EmailMultiAlternatives
from django.core.cache import cache
from apps.reader.models import UserSubscription, MUserStory from apps.reader.models import UserSubscription, MUserStory
from apps.analyzer.models import MClassifierFeed, MClassifierAuthor, MClassifierTag, MClassifierTitle from apps.analyzer.models import MClassifierFeed, MClassifierAuthor, MClassifierTag, MClassifierTitle
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds, apply_classifier_authors, apply_classifier_tags from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds, apply_classifier_authors, apply_classifier_tags
@ -791,7 +792,7 @@ class MSocialSubscription(mongo.Document):
} }
def get_stories(self, offset=0, limit=6, order='newest', read_filter='all', def get_stories(self, offset=0, limit=6, order='newest', read_filter='all',
withscores=False, everything_unread=False): withscores=False, everything_unread=False, fetch_stories=True):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL) r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
ignore_user_stories = False ignore_user_stories = False
@ -808,7 +809,8 @@ class MSocialSubscription(mongo.Document):
r.sdiffstore(unread_stories_key, stories_key, read_stories_key) r.sdiffstore(unread_stories_key, stories_key, read_stories_key)
sorted_stories_key = 'zB:%s' % (self.subscription_user_id) sorted_stories_key = 'zB:%s' % (self.subscription_user_id)
unread_ranked_stories_key = 'zUB:%s:%s' % (self.user_id, self.subscription_user_id) unread_ranked_stories_key = 'z%sUB:%s:%s' % ('f' if fetch_stories else '',
self.user_id, self.subscription_user_id)
r.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key]) r.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
current_time = int(time.time() + 60*60*24) current_time = int(time.time() + 60*60*24)
@ -849,31 +851,40 @@ class MSocialSubscription(mongo.Document):
if not isinstance(social_user_ids, list): if not isinstance(social_user_ids, list):
social_user_ids = [social_user_ids] social_user_ids = [social_user_ids]
unread_ranked_stories_keys = 'zU:%s:social' % (user_id) ranked_stories_keys = 'zU:%s:social' % (user_id)
if offset and r.exists(unread_ranked_stories_keys): unread_ranked_stories_keys = 'zfU:%s:social' % (user_id)
story_hashes = range_func(unread_ranked_stories_keys, offset, limit, withscores=True) unread_story_hashes = cache.get(unread_ranked_stories_keys)
if offset and r.exists(ranked_stories_keys) and unread_story_hashes:
story_hashes = range_func(ranked_stories_keys, offset, limit, withscores=True)
if story_hashes: if story_hashes:
return zip(*story_hashes) story_hashes, story_dates = zip(*story_hashes)
return story_hashes, story_dates, unread_story_hashes
else: else:
return [], [] return [], [], {}
else: else:
r.delete(unread_ranked_stories_keys) r.delete(ranked_stories_keys)
cache.delete(unread_ranked_stories_keys)
unread_feed_story_hashes = {}
for social_user_id in social_user_ids: for social_user_id in social_user_ids:
us = cls.objects.get(user_id=relative_user_id, subscription_user_id=social_user_id) us = cls.objects.get(user_id=relative_user_id, subscription_user_id=social_user_id)
story_hashes = us.get_stories(offset=0, limit=100, story_hashes = us.get_stories(offset=0, limit=100,
order=order, read_filter=read_filter, order=order, read_filter=read_filter,
withscores=True, everything_unread=everything_unread) withscores=True, everything_unread=everything_unread)
unread_feed_story_hashes[social_user_id] = us.get_stories(read_filter='unread', limit=500,
fetch_stories=False)
if story_hashes: if story_hashes:
r.zadd(unread_ranked_stories_keys, **dict(story_hashes)) r.zadd(ranked_stories_keys, **dict(story_hashes))
story_hashes = range_func(unread_ranked_stories_keys, offset, limit, withscores=True) story_hashes = range_func(ranked_stories_keys, offset, limit, withscores=True)
r.expire(unread_ranked_stories_keys, 24*60*60) r.expire(ranked_stories_keys, 24*60*60)
cache.set(unread_ranked_stories_keys, unread_feed_story_hashes, 24*60*60)
if story_hashes: if story_hashes:
return zip(*story_hashes) story_hashes, story_dates = zip(*story_hashes)
return story_hashes, story_dates, unread_feed_story_hashes
else: else:
return [], [] return [], [], {}
def mark_story_ids_as_read(self, story_ids, feed_id=None, mark_all_read=False, request=None): def mark_story_ids_as_read(self, story_ids, feed_id=None, mark_all_read=False, request=None):
data = dict(code=0, payload=story_ids) data = dict(code=0, payload=story_ids)

View file

@ -177,7 +177,6 @@ def load_river_blurblog(request):
relative_user_id = request.REQUEST.get('relative_user_id', None) relative_user_id = request.REQUEST.get('relative_user_id', None)
global_feed = request.REQUEST.get('global_feed', None) global_feed = request.REQUEST.get('global_feed', None)
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone) now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
if global_feed: if global_feed:
global_user = User.objects.get(username='popular') global_user = User.objects.get(username='popular')
@ -193,11 +192,12 @@ def load_river_blurblog(request):
offset = (page-1) * limit offset = (page-1) * limit
limit = page * limit - 1 limit = page * limit - 1
story_hashes, story_dates = MSocialSubscription.feed_stories(user.pk, social_user_ids, story_hashes, story_dates, unread_feed_story_hashes = MSocialSubscription.feed_stories(
offset=offset, limit=limit, user.pk, social_user_ids,
order=order, read_filter=read_filter, offset=offset, limit=limit,
relative_user_id=relative_user_id, order=order, read_filter=read_filter,
everything_unread=global_feed) relative_user_id=relative_user_id,
everything_unread=global_feed)
mstories = MStory.find_by_story_hashes(story_hashes) mstories = MStory.find_by_story_hashes(story_hashes)
story_hashes_to_dates = dict(zip(story_hashes, story_dates)) story_hashes_to_dates = dict(zip(story_hashes, story_dates))
def sort_stories_by_hash(a, b): def sort_stories_by_hash(a, b):
@ -222,7 +222,6 @@ def load_river_blurblog(request):
unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids) unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids)
unsub_feeds = [feed.canonical(include_favicon=False) for feed in unsub_feeds] unsub_feeds = [feed.canonical(include_favicon=False) for feed in unsub_feeds]
# Find starred stories
if story_feed_ids: if story_feed_ids:
story_hashes = [story['story_hash'] for story in stories] story_hashes = [story['story_hash'] for story in stories]
starred_stories = MStarredStory.objects( starred_stories = MStarredStory.objects(
@ -236,17 +235,10 @@ def load_river_blurblog(request):
.only('story_hash', 'shared_date', 'comments') .only('story_hash', 'shared_date', 'comments')
shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date, shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
comments=story.comments)) comments=story.comments))
for story in shared_stories]) for story in shared_stories])
userstories_db = MUserStory.objects(user_id=user.pk,
feed_id__in=story_feed_ids,
story_id__in=story_ids).only('story_id')
userstories = set(us.story_id for us in userstories_db)
else: else:
starred_stories = {} starred_stories = {}
shared_stories = {} shared_stories = {}
userstories = []
# Intelligence classifiers for all feeds involved # Intelligence classifiers for all feeds involved
if story_feed_ids: if story_feed_ids:
@ -268,12 +260,12 @@ def load_river_blurblog(request):
# Just need to format stories # Just need to format stories
for story in stories: for story in stories:
if story['id'] in userstories: story['read_status'] = 1
story['read_status'] = 1 print unread_feed_story_hashes
elif story['story_date'] < UNREAD_CUTOFF: for social_user_id in unread_feed_story_hashes.keys():
story['read_status'] = 1 if story['story_hash'] in unread_feed_story_hashes[social_user_id]:
else: story['read_status'] = 0
story['read_status'] = 0 break
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone) story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, now) story['short_parsed_date'] = format_story_link_date__short(story_date, now)
story['long_parsed_date'] = format_story_link_date__long(story_date, now) story['long_parsed_date'] = format_story_link_date__long(story_date, now)