mirror of
https://github.com/samuelclay/NewsBlur.git
synced 2025-08-05 16:58:59 +00:00
Taking archive unreads into account in single feeds.
This commit is contained in:
parent
da1f2c477a
commit
a633c0983b
4 changed files with 102 additions and 27 deletions
|
@ -162,11 +162,11 @@ class UserSubscription(models.Model):
|
||||||
|
|
||||||
pipeline.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
|
pipeline.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
|
||||||
if order == 'oldest':
|
if order == 'oldest':
|
||||||
removed_min = pipeline.zremrangebyscore(unread_ranked_stories_key, 0, min_score-1)
|
pipeline.zremrangebyscore(unread_ranked_stories_key, 0, min_score-1)
|
||||||
removed_max = pipeline.zremrangebyscore(unread_ranked_stories_key, max_score+1, 2*max_score)
|
pipeline.zremrangebyscore(unread_ranked_stories_key, max_score+1, 2*max_score)
|
||||||
else:
|
else:
|
||||||
removed_min = pipeline.zremrangebyscore(unread_ranked_stories_key, 0, max_score-1)
|
pipeline.zremrangebyscore(unread_ranked_stories_key, 0, max_score-1)
|
||||||
removed_max = pipeline.zremrangebyscore(unread_ranked_stories_key, min_score+1, 2*min_score)
|
pipeline.zremrangebyscore(unread_ranked_stories_key, min_score+1, 2*min_score)
|
||||||
|
|
||||||
if User.objects.get(pk=user_id).profile.is_archive:
|
if User.objects.get(pk=user_id).profile.is_archive:
|
||||||
user_unread_stories_feed_key = f"uU:{user_id}:{feed_id}"
|
user_unread_stories_feed_key = f"uU:{user_id}:{feed_id}"
|
||||||
|
@ -176,8 +176,6 @@ class UserSubscription(models.Model):
|
||||||
min_score = int(oldest_unread[0][1])
|
min_score = int(oldest_unread[0][1])
|
||||||
else:
|
else:
|
||||||
max_score = int(oldest_unread[0][1])
|
max_score = int(oldest_unread[0][1])
|
||||||
if settings.DEBUG:
|
|
||||||
logging.debug(f"Oldest unread: {oldest_unread}, removed {removed_min} below and {removed_max} above")
|
|
||||||
|
|
||||||
pipeline.zunionstore(unread_ranked_stories_key, [unread_ranked_stories_key, user_unread_stories_feed_key], aggregate="MAX")
|
pipeline.zunionstore(unread_ranked_stories_key, [unread_ranked_stories_key, user_unread_stories_feed_key], aggregate="MAX")
|
||||||
|
|
||||||
|
@ -262,16 +260,13 @@ class UserSubscription(models.Model):
|
||||||
r.delete(unread_stories_key)
|
r.delete(unread_stories_key)
|
||||||
|
|
||||||
if self.user.profile.is_archive:
|
if self.user.profile.is_archive:
|
||||||
user_unread_stories_feed_key = f"uU:{self.user_id}:{self.feed_id}"
|
oldest_unread = self.oldest_unread_story_date()
|
||||||
oldest_unread = r.zrevrange(user_unread_stories_feed_key, -1, -1, withscores=True)
|
|
||||||
if oldest_unread:
|
if oldest_unread:
|
||||||
if order == 'oldest':
|
if order == 'oldest':
|
||||||
min_score = int(oldest_unread[0][1])
|
min_score = int(oldest_unread[0][1])
|
||||||
else:
|
else:
|
||||||
max_score = int(oldest_unread[0][1])
|
max_score = int(oldest_unread[0][1])
|
||||||
if settings.DEBUG:
|
user_unread_stories_feed_key = f"uU:{self.user_id}:{self.feed_id}"
|
||||||
logging.debug(f"Oldest unread: {oldest_unread}, removed {removed_min} below and {removed_max} above")
|
|
||||||
|
|
||||||
r.zunionstore(unread_ranked_stories_key, [unread_ranked_stories_key, user_unread_stories_feed_key], aggregate="MAX")
|
r.zunionstore(unread_ranked_stories_key, [unread_ranked_stories_key, user_unread_stories_feed_key], aggregate="MAX")
|
||||||
|
|
||||||
# Weird encoding error on redis' part, where a DUMP causes an encoding
|
# Weird encoding error on redis' part, where a DUMP causes an encoding
|
||||||
|
@ -283,8 +278,16 @@ class UserSubscription(models.Model):
|
||||||
pipeline.restore(unread_ranked_stories_key, 1*60*60*1000, dump)
|
pipeline.restore(unread_ranked_stories_key, 1*60*60*1000, dump)
|
||||||
pipeline.execute()
|
pipeline.execute()
|
||||||
r.delete(unread_ranked_stories_key)
|
r.delete(unread_ranked_stories_key)
|
||||||
|
else:
|
||||||
|
if self.user.profile.is_archive:
|
||||||
|
oldest_unread = self.oldest_unread_story_date()
|
||||||
|
if oldest_unread:
|
||||||
|
if order == 'oldest':
|
||||||
|
min_score = int(oldest_unread[0][1])
|
||||||
|
else:
|
||||||
|
max_score = int(oldest_unread[0][1])
|
||||||
|
|
||||||
if settings.DEBUG and False:
|
if settings.DEBUG:
|
||||||
debug_stories = rt.zrevrange(unread_ranked_stories_key, 0, -1, withscores=True)
|
debug_stories = rt.zrevrange(unread_ranked_stories_key, 0, -1, withscores=True)
|
||||||
print((" ---> Unread all stories (%s - %s) %s stories: %s" % (
|
print((" ---> Unread all stories (%s - %s) %s stories: %s" % (
|
||||||
min_score,
|
min_score,
|
||||||
|
@ -386,6 +389,15 @@ class UserSubscription(models.Model):
|
||||||
user = User.objects.get(pk=user_id)
|
user = User.objects.get(pk=user_id)
|
||||||
return user.profile.days_of_story_hashes
|
return user.profile.days_of_story_hashes
|
||||||
|
|
||||||
|
def oldest_unread_story_date(self, r=None):
|
||||||
|
if not r:
|
||||||
|
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
|
||||||
|
|
||||||
|
user_unread_stories_feed_key = f"uU:{self.user_id}:{self.feed_id}"
|
||||||
|
oldest_unread = r.zrevrange(user_unread_stories_feed_key, -1, -1, withscores=True)
|
||||||
|
|
||||||
|
return oldest_unread
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def truncate_river(cls, user_id, feed_ids, read_filter, cache_prefix=""):
|
def truncate_river(cls, user_id, feed_ids, read_filter, cache_prefix=""):
|
||||||
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
|
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
|
||||||
|
@ -747,6 +759,9 @@ class UserSubscription(models.Model):
|
||||||
RUserStory.mark_read(self.user_id, self.feed_id, story_hash, aggregated=aggregated)
|
RUserStory.mark_read(self.user_id, self.feed_id, story_hash, aggregated=aggregated)
|
||||||
r.publish(self.user.username, 'story:read:%s' % story_hash)
|
r.publish(self.user.username, 'story:read:%s' % story_hash)
|
||||||
|
|
||||||
|
if self.user.profile.is_archive:
|
||||||
|
RUserUnreadStory.mark_read(self.user_id, self.feed_id, story_hash)
|
||||||
|
|
||||||
r.publish(self.user.username, 'feed:%s' % self.feed_id)
|
r.publish(self.user.username, 'feed:%s' % self.feed_id)
|
||||||
|
|
||||||
self.last_read_date = datetime.datetime.now()
|
self.last_read_date = datetime.datetime.now()
|
||||||
|
@ -763,7 +778,6 @@ class UserSubscription(models.Model):
|
||||||
if self.user.profile.is_archive and story.story_date < self.user.profile.unread_cutoff:
|
if self.user.profile.is_archive and story.story_date < self.user.profile.unread_cutoff:
|
||||||
user_unread_story = RUserUnreadStory.mark_unread(
|
user_unread_story = RUserUnreadStory.mark_unread(
|
||||||
user_id=self.user_id,
|
user_id=self.user_id,
|
||||||
feed_id=story.story_feed_id,
|
|
||||||
story_hash=story.story_hash,
|
story_hash=story.story_hash,
|
||||||
story_date=story.story_date,
|
story_date=story.story_date,
|
||||||
)
|
)
|
||||||
|
@ -958,6 +972,8 @@ class UserSubscription(models.Model):
|
||||||
# Switch read stories
|
# Switch read stories
|
||||||
RUserStory.switch_feed(user_id=self.user_id, old_feed_id=old_feed.pk,
|
RUserStory.switch_feed(user_id=self.user_id, old_feed_id=old_feed.pk,
|
||||||
new_feed_id=new_feed.pk)
|
new_feed_id=new_feed.pk)
|
||||||
|
RUserUnreadStory.switch_feed(user_id=self.user_id, old_feed_id=old_feed.pk,
|
||||||
|
new_feed_id=new_feed.pk)
|
||||||
|
|
||||||
def switch_feed_for_classifier(model):
|
def switch_feed_for_classifier(model):
|
||||||
duplicates = model.objects(feed_id=old_feed.pk, user_id=self.user_id)
|
duplicates = model.objects(feed_id=old_feed.pk, user_id=self.user_id)
|
||||||
|
@ -1785,16 +1801,73 @@ class RUserUnreadStory:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def mark_unread(cls, user_id, feed_id, story_hash, story_date):
|
def mark_unread(cls, user_id, story_hash, story_date, r=None):
|
||||||
|
if not r:
|
||||||
|
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
|
||||||
|
if isinstance(story_date, float):
|
||||||
|
story_date = int(story_date)
|
||||||
|
if not isinstance(story_date, int):
|
||||||
|
story_date = int(time.mktime(story_date.timetuple()))
|
||||||
|
|
||||||
|
feed_id, _ = MStory.split_story_hash(story_hash)
|
||||||
user_unread_stories_key = f"uU:{user_id}"
|
user_unread_stories_key = f"uU:{user_id}"
|
||||||
user_unread_stories_feed_key = f"uU:{user_id}:{feed_id}"
|
user_unread_stories_feed_key = f"uU:{user_id}:{feed_id}"
|
||||||
|
|
||||||
|
r.zadd(user_unread_stories_key, {story_hash: story_date})
|
||||||
|
r.zadd(user_unread_stories_feed_key, {story_hash: story_date})
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def mark_read(cls, user_id, story_hashes, r=None):
|
||||||
|
if not isinstance(story_hashes, list):
|
||||||
|
story_hashes = [story_hashes]
|
||||||
|
if not r:
|
||||||
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
|
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
|
||||||
r.zadd(user_unread_stories_key, {story_hash: time.mktime(story_date.timetuple())})
|
|
||||||
r.zadd(user_unread_stories_feed_key, {story_hash: time.mktime(story_date.timetuple())})
|
pipeline = r.pipeline()
|
||||||
|
for story_hash in story_hashes:
|
||||||
|
feed_id, _ = MStory.split_story_hash(story_hash)
|
||||||
|
|
||||||
|
user_unread_stories_key = f"uU:{user_id}"
|
||||||
|
user_unread_stories_feed_key = f"uU:{user_id}:{feed_id}"
|
||||||
|
|
||||||
|
pipeline.zrem(user_unread_stories_key, story_hash)
|
||||||
|
pipeline.zrem(user_unread_stories_feed_key, story_hash)
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def unreads(cls, user_id, story_hash):
|
def unreads(cls, user_id, story_hash):
|
||||||
if not isinstance(story_hash, list):
|
if not isinstance(story_hash, list):
|
||||||
story_hash = [story_hash]
|
story_hash = [story_hash]
|
||||||
|
|
||||||
user_unread_stories = cls.objects.filter(user_id=user_id, story_hash__in=story_hash)
|
user_unread_stories = cls.objects.filter(user_id=user_id, story_hash__in=story_hash)
|
||||||
|
|
||||||
return user_unread_stories
|
return user_unread_stories
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_stories_and_dates(user_id, feed_id, r=None):
|
||||||
|
if not r:
|
||||||
|
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
|
||||||
|
|
||||||
|
user_unread_stories_feed_key = f"uU:{user_id}:{feed_id}"
|
||||||
|
story_hashes = r.zrange(user_unread_stories_feed_key, 0, -1, withscores=True)
|
||||||
|
|
||||||
|
return story_hashes
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def switch_feed(cls, user_id, old_feed_id, new_feed_id):
|
||||||
|
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
|
||||||
|
p = r.pipeline()
|
||||||
|
story_hashes = cls.get_stories_and_dates(user_id, old_feed_id, r=r)
|
||||||
|
|
||||||
|
for (story_hash, story_timestamp) in story_hashes:
|
||||||
|
_, hash_story = MStory.split_story_hash(story_hash)
|
||||||
|
new_story_hash = "%s:%s" % (new_feed_id, hash_story)
|
||||||
|
read_feed_key = "RS:%s:%s" % (user_id, new_feed_id)
|
||||||
|
user_unread_stories_feed_key = f"uU:{user_id}:{new_feed_id}"
|
||||||
|
cls.mark_unread(user_id, new_story_hash, story_timestamp, r=p)
|
||||||
|
|
||||||
|
p.execute()
|
||||||
|
|
||||||
|
if len(story_hashes) > 0:
|
||||||
|
logging.info(" ---> %s archived unread stories" % len(story_hashes))
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds
|
||||||
from apps.analyzer.models import apply_classifier_authors, apply_classifier_tags
|
from apps.analyzer.models import apply_classifier_authors, apply_classifier_tags
|
||||||
from apps.analyzer.models import get_classifiers_for_user, sort_classifiers_by_feed
|
from apps.analyzer.models import get_classifiers_for_user, sort_classifiers_by_feed
|
||||||
from apps.profile.models import Profile, MCustomStyling, MDashboardRiver
|
from apps.profile.models import Profile, MCustomStyling, MDashboardRiver
|
||||||
from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory, Feature
|
from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory, RUserUnreadStory, Feature
|
||||||
from apps.reader.forms import SignupForm, LoginForm, FeatureForm
|
from apps.reader.forms import SignupForm, LoginForm, FeatureForm
|
||||||
from apps.rss_feeds.models import MFeedIcon, MStarredStoryCounts, MSavedSearch
|
from apps.rss_feeds.models import MFeedIcon, MStarredStoryCounts, MSavedSearch
|
||||||
from apps.notifications.models import MUserFeedNotification
|
from apps.notifications.models import MUserFeedNotification
|
||||||
|
@ -666,7 +666,7 @@ def load_single_feed(request, feed_id):
|
||||||
|
|
||||||
if page > 200:
|
if page > 200:
|
||||||
logging.user(request, "~BR~FK~SBOver page 200 on single feed: %s" % page)
|
logging.user(request, "~BR~FK~SBOver page 200 on single feed: %s" % page)
|
||||||
raise Http404
|
assert False
|
||||||
|
|
||||||
if query:
|
if query:
|
||||||
if user.profile.is_premium:
|
if user.profile.is_premium:
|
||||||
|
@ -753,7 +753,7 @@ def load_single_feed(request, feed_id):
|
||||||
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
|
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
|
||||||
if usersub:
|
if usersub:
|
||||||
story['read_status'] = 1
|
story['read_status'] = 1
|
||||||
if story['story_date'] < user.profile.unread_cutoff:
|
if not user.profile.is_archive and story['story_date'] < user.profile.unread_cutoff:
|
||||||
story['read_status'] = 1
|
story['read_status'] = 1
|
||||||
elif (read_filter == 'all' or query) and usersub:
|
elif (read_filter == 'all' or query) and usersub:
|
||||||
story['read_status'] = 1 if story['story_hash'] not in unread_story_hashes else 0
|
story['read_status'] = 1 if story['story_hash'] not in unread_story_hashes else 0
|
||||||
|
@ -1818,6 +1818,9 @@ def mark_story_hashes_as_read(request):
|
||||||
|
|
||||||
feed_ids, friend_ids = RUserStory.mark_story_hashes_read(request.user.pk, story_hashes, username=request.user.username)
|
feed_ids, friend_ids = RUserStory.mark_story_hashes_read(request.user.pk, story_hashes, username=request.user.username)
|
||||||
|
|
||||||
|
if request.user.profile.is_archive:
|
||||||
|
RUserUnreadStory.mark_read(request.user.pk, story_hashes)
|
||||||
|
|
||||||
if friend_ids:
|
if friend_ids:
|
||||||
socialsubs = MSocialSubscription.objects.filter(
|
socialsubs = MSocialSubscription.objects.filter(
|
||||||
user_id=request.user.pk,
|
user_id=request.user.pk,
|
||||||
|
|
|
@ -27,7 +27,6 @@ DEBUG = True
|
||||||
# `./manage.py collectstatic` first. Turn this on for development so you can see
|
# `./manage.py collectstatic` first. Turn this on for development so you can see
|
||||||
# changes in your JS/CSS.
|
# changes in your JS/CSS.
|
||||||
DEBUG_ASSETS = False # Make sure to run `./manage.py collectstatic` first
|
DEBUG_ASSETS = False # Make sure to run `./manage.py collectstatic` first
|
||||||
DEBUG_ASSETS = False # Make sure to run `./manage.py collectstatic` first
|
|
||||||
DEBUG_ASSETS = True
|
DEBUG_ASSETS = True
|
||||||
|
|
||||||
# DEBUG_QUERIES controls the output of the database query logs. Can be rather verbose
|
# DEBUG_QUERIES controls the output of the database query logs. Can be rather verbose
|
||||||
|
@ -35,7 +34,7 @@ DEBUG_ASSETS = True
|
||||||
# down verbosity.
|
# down verbosity.
|
||||||
DEBUG_QUERIES = DEBUG
|
DEBUG_QUERIES = DEBUG
|
||||||
DEBUG_QUERIES_SUMMARY_ONLY = True
|
DEBUG_QUERIES_SUMMARY_ONLY = True
|
||||||
# DEBUG_QUERIES_SUMMARY_ONLY = False
|
DEBUG_QUERIES_SUMMARY_ONLY = False
|
||||||
|
|
||||||
MEDIA_URL = '/media/'
|
MEDIA_URL = '/media/'
|
||||||
IMAGES_URL = '/imageproxy'
|
IMAGES_URL = '/imageproxy'
|
||||||
|
|
|
@ -94,8 +94,8 @@ def _mongodb_decode_wire_protocol(message):
|
||||||
op = MONGO_OPS.get(opcode, 'unknown')
|
op = MONGO_OPS.get(opcode, 'unknown')
|
||||||
zidx = 20
|
zidx = 20
|
||||||
collection_name_size = message[zidx:].find(b'\0')
|
collection_name_size = message[zidx:].find(b'\0')
|
||||||
collection_name = message[zidx:zidx+collection_name_size]
|
collection_name = message[zidx:zidx+collection_name_size].decode('utf-8')
|
||||||
if b'.system.' in collection_name:
|
if '.system.' in collection_name:
|
||||||
return
|
return
|
||||||
zidx += collection_name_size + 1
|
zidx += collection_name_size + 1
|
||||||
skip, limit = struct.unpack('<ii', message[zidx:zidx+8])
|
skip, limit = struct.unpack('<ii', message[zidx:zidx+8])
|
||||||
|
|
Loading…
Add table
Reference in a new issue