Refactoring story_hashes grouP-by_feed in prep for removing .get_stories().

This commit is contained in:
Samuel Clay 2022-06-23 15:35:20 -04:00
parent 8aff53c2ba
commit 6792762e27
2 changed files with 12 additions and 11 deletions

View file

@ -111,7 +111,7 @@ class UserSubscription(models.Model):
@classmethod
def story_hashes(cls, user_id, feed_ids=None, usersubs=None, read_filter="unread", order="newest",
include_timestamps=False, group_by_feed=True, cutoff_date=None,
include_timestamps=False, group_by_feed=False, cutoff_date=None,
across_all_feeds=True):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
pipeline = r.pipeline()
@ -362,10 +362,10 @@ class UserSubscription(models.Model):
story_hashes = cls.story_hashes(user_id, feed_ids=feed_ids,
read_filter=read_filter, order=order,
include_timestamps=True,
group_by_feed=False,
usersubs=usersubs,
cutoff_date=cutoff_date,
across_all_feeds=across_all_feeds)
if not story_hashes:
return [], []
@ -382,7 +382,6 @@ class UserSubscription(models.Model):
unread_story_hashes = cls.story_hashes(user_id, feed_ids=feed_ids,
read_filter="unread", order=order,
include_timestamps=True,
group_by_feed=False,
cutoff_date=cutoff_date)
if unread_story_hashes:
for unread_story_hash_group in chunks(unread_story_hashes, 100):
@ -830,8 +829,9 @@ class UserSubscription(models.Model):
return
cutoff_date = cutoff_date - datetime.timedelta(seconds=1)
story_hashes = self.get_stories(limit=500, order="newest", cutoff_date=cutoff_date,
read_filter="unread", hashes_only=True)
story_hashes = UserSubscription.story_hashes(self.user.pk, feed_ids=[self.feed.pk],
order="newest", read_filter="unread",
cutoff_date=cutoff_date)
data = self.mark_story_ids_as_read(story_hashes, aggregated=True)
return data
@ -938,7 +938,7 @@ class UserSubscription(models.Model):
unread_story_hashes = self.story_hashes(user_id=self.user_id, feed_ids=[self.feed_id],
usersubs=[self],
read_filter='unread', group_by_feed=False,
read_filter='unread',
cutoff_date=self.user.profile.unread_cutoff)
if not stories:
@ -1005,7 +1005,7 @@ class UserSubscription(models.Model):
else:
unread_story_hashes = self.story_hashes(user_id=self.user_id, feed_ids=[self.feed_id],
usersubs=[self],
read_filter='unread', group_by_feed=False,
read_filter='unread',
include_timestamps=True,
cutoff_date=date_delta)
@ -1402,7 +1402,9 @@ class RUserStory:
def switch_feed(cls, user_id, old_feed_id, new_feed_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
p = r.pipeline()
story_hashes = cls.get_stories(user_id, old_feed_id, r=r)
story_hashes = UserSubscription.story_hashes(user_id, feed_ids=[old_feed_id])
# story_hashes = cls.get_stories(user_id, old_feed_id, r=r)
for story_hash in story_hashes:
_, hash_story = MStory.split_story_hash(story_hash)

View file

@ -729,7 +729,6 @@ def load_single_feed(request, feed_id):
unread_story_hashes = UserSubscription.story_hashes(user.pk, read_filter='unread',
feed_ids=[usersub.feed_id],
usersubs=[usersub],
group_by_feed=False,
cutoff_date=user.profile.unread_cutoff)
story_hashes = [story['story_hash'] for story in stories if story['story_hash']]
starred_stories = MStarredStory.objects(user_id=user.pk,
@ -1427,7 +1426,6 @@ def load_river_stories__redis(request):
mstories = stories
unread_feed_story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids,
read_filter="unread", order=order,
group_by_feed=False,
cutoff_date=user.profile.unread_cutoff)
else:
stories = []
@ -1681,7 +1679,7 @@ def complete_river(request):
if feed_ids:
stories_truncated = UserSubscription.truncate_river(user.pk, feed_ids, read_filter, cache_prefix="dashboard:")
if page > 1:
if page >= 1:
logging.user(request, "~FC~BBRiver complete on page ~SB%s~SN, truncating ~SB%s~SN stories from ~SB%s~SN feeds" % (page, stories_truncated, len(feed_ids)))
return dict(code=1, message="Truncated %s stories from %s" % (stories_truncated, len(feed_ids)))
@ -1738,6 +1736,7 @@ def unread_story_hashes(request):
story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids,
order=order, read_filter=read_filter,
include_timestamps=include_timestamps,
group_by_feed=True,
cutoff_date=user.profile.unread_cutoff)
logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" %