mirror of
https://github.com/samuelclay/NewsBlur.git
synced 2025-09-18 21:50:56 +00:00
Major optimization to both single feed loads and river loads. Both are now checking training only for trained feeds, and river makes only a single subscirption call instead of N subscription calls.
This commit is contained in:
parent
b8e6ff4a70
commit
e62b058638
2 changed files with 24 additions and 14 deletions
|
|
@ -100,7 +100,7 @@ class UserSubscription(models.Model):
|
||||||
else:
|
else:
|
||||||
r.delete(unread_ranked_stories_key)
|
r.delete(unread_ranked_stories_key)
|
||||||
if not r.exists(stories_key):
|
if not r.exists(stories_key):
|
||||||
print " ---> No stories on feed: %s" % self
|
# print " ---> No stories on feed: %s" % self
|
||||||
return []
|
return []
|
||||||
elif read_filter != 'unread' or not r.exists(read_stories_key):
|
elif read_filter != 'unread' or not r.exists(read_stories_key):
|
||||||
ignore_user_stories = True
|
ignore_user_stories = True
|
||||||
|
|
@ -157,7 +157,8 @@ class UserSubscription(models.Model):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def feed_stories(cls, user_id, feed_ids, offset=0, limit=6, order='newest', read_filter='all'):
|
def feed_stories(cls, user_id, feed_ids, offset=0, limit=6, order='newest', read_filter='all',
|
||||||
|
usersubs=None):
|
||||||
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
|
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
|
||||||
|
|
||||||
if order == 'oldest':
|
if order == 'oldest':
|
||||||
|
|
@ -178,11 +179,15 @@ class UserSubscription(models.Model):
|
||||||
r.delete(ranked_stories_keys)
|
r.delete(ranked_stories_keys)
|
||||||
cache.delete(unread_ranked_stories_keys)
|
cache.delete(unread_ranked_stories_keys)
|
||||||
|
|
||||||
|
if not usersubs:
|
||||||
|
usersubs = cls.objects.get(user=user_id, feed__in=feed_ids)
|
||||||
|
usersubs = dict((sub.feed_id, sub) for sub in usersubs)
|
||||||
|
|
||||||
unread_feed_story_hashes = {}
|
unread_feed_story_hashes = {}
|
||||||
for feed_id in feed_ids:
|
for feed_id in feed_ids:
|
||||||
try:
|
if feed_id in usersubs:
|
||||||
us = cls.objects.get(user=user_id, feed=feed_id)
|
us = usersubs[feed_id]
|
||||||
except cls.DoesNotExist:
|
else:
|
||||||
continue
|
continue
|
||||||
story_hashes = us.get_stories(offset=0, limit=200,
|
story_hashes = us.get_stories(offset=0, limit=200,
|
||||||
order=order, read_filter=read_filter,
|
order=order, read_filter=read_filter,
|
||||||
|
|
|
||||||
|
|
@ -786,8 +786,10 @@ def load_river_stories__redis(request):
|
||||||
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
|
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
|
||||||
|
|
||||||
if not feed_ids and not story_hashes:
|
if not feed_ids and not story_hashes:
|
||||||
usersubs = UserSubscription.objects.filter(user=user, active=True).only('feed')
|
usersubs = UserSubscription.objects.filter(user=user, active=True)
|
||||||
feed_ids = [sub.feed_id for sub in usersubs]
|
feed_ids = [sub.feed_id for sub in usersubs]
|
||||||
|
else:
|
||||||
|
usersubs = UserSubscription.objects.filter(user=user, active=True, feed__in=feed_ids)
|
||||||
|
|
||||||
offset = (page-1) * limit
|
offset = (page-1) * limit
|
||||||
limit = page * limit - 1
|
limit = page * limit - 1
|
||||||
|
|
@ -800,11 +802,14 @@ def load_river_stories__redis(request):
|
||||||
story_hashes, unread_feed_story_hashes = UserSubscription.feed_stories(user.pk, feed_ids,
|
story_hashes, unread_feed_story_hashes = UserSubscription.feed_stories(user.pk, feed_ids,
|
||||||
offset=offset, limit=limit,
|
offset=offset, limit=limit,
|
||||||
order=order,
|
order=order,
|
||||||
read_filter=read_filter)
|
read_filter=read_filter,
|
||||||
|
usersubs=usersubs)
|
||||||
mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order)
|
mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order)
|
||||||
stories = Feed.format_stories(mstories)
|
stories = Feed.format_stories(mstories)
|
||||||
found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
|
found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
|
||||||
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk)
|
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk)
|
||||||
|
trained_feed_ids = [sub.feed_id for sub in usersubs if sub.is_trained]
|
||||||
|
found_trained_feed_ids = list(set(trained_feed_ids) & set(found_feed_ids))
|
||||||
|
|
||||||
# Find starred stories
|
# Find starred stories
|
||||||
if found_feed_ids:
|
if found_feed_ids:
|
||||||
|
|
@ -818,15 +823,15 @@ def load_river_stories__redis(request):
|
||||||
starred_stories = {}
|
starred_stories = {}
|
||||||
|
|
||||||
# Intelligence classifiers for all feeds involved
|
# Intelligence classifiers for all feeds involved
|
||||||
if found_feed_ids:
|
if found_trained_feed_ids:
|
||||||
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk,
|
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk,
|
||||||
feed_id__in=found_feed_ids))
|
feed_id__in=found_trained_feed_ids))
|
||||||
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk,
|
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk,
|
||||||
feed_id__in=found_feed_ids))
|
feed_id__in=found_trained_feed_ids))
|
||||||
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk,
|
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk,
|
||||||
feed_id__in=found_feed_ids))
|
feed_id__in=found_trained_feed_ids))
|
||||||
classifier_tags = list(MClassifierTag.objects(user_id=user.pk,
|
classifier_tags = list(MClassifierTag.objects(user_id=user.pk,
|
||||||
feed_id__in=found_feed_ids))
|
feed_id__in=found_trained_feed_ids))
|
||||||
else:
|
else:
|
||||||
classifier_feeds = []
|
classifier_feeds = []
|
||||||
classifier_authors = []
|
classifier_authors = []
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue