mirror of
https://github.com/viq/NewsBlur.git
synced 2025-09-18 21:43:31 +00:00
Removing items before the mark_read_date. Paging is broken though.
This commit is contained in:
parent
b562446884
commit
f4114e038f
3 changed files with 72 additions and 10 deletions
|
@ -19,6 +19,7 @@ class UserSubscription(models.Model):
|
|||
are not accurate and need to be calculated with `self.calculate_feed_scores()`.
|
||||
"""
|
||||
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
|
||||
|
||||
user = models.ForeignKey(User, related_name='subscriptions')
|
||||
feed = models.ForeignKey(Feed, related_name='subscribers')
|
||||
user_title = models.CharField(max_length=255, null=True, blank=True)
|
||||
|
@ -48,11 +49,14 @@ class UserSubscription(models.Model):
|
|||
|
||||
def mark_feed_read(self):
|
||||
now = datetime.datetime.utcnow()
|
||||
|
||||
# Use the latest story to get last read time.
|
||||
if MStory.objects(story_feed_id=self.feed.pk).first():
|
||||
latest_story_date = MStory.objects(story_feed_id=self.feed.pk).order_by('-story_date').only('story_date')[0]['story_date']\
|
||||
+ datetime.timedelta(minutes=1)
|
||||
else:
|
||||
latest_story_date = now
|
||||
|
||||
self.last_read_date = latest_story_date
|
||||
self.mark_read_date = latest_story_date
|
||||
self.unread_count_negative = 0
|
||||
|
@ -60,6 +64,8 @@ class UserSubscription(models.Model):
|
|||
self.unread_count_neutral = 0
|
||||
self.unread_count_updated = latest_story_date
|
||||
self.needs_unread_recalc = False
|
||||
MUserStory.delete_marked_as_read_stories(self.user.pk, self.feed.pk)
|
||||
|
||||
self.save()
|
||||
|
||||
def calculate_feed_scores(self, silent=False, stories_db=None):
|
||||
|
@ -158,6 +164,7 @@ class UserSubscription(models.Model):
|
|||
self.unread_count_positive = feed_scores['positive']
|
||||
self.unread_count_neutral = feed_scores['neutral']
|
||||
self.unread_count_negative = feed_scores['negative']
|
||||
self.unread_count_updated = datetime.datetime.now()
|
||||
self.needs_unread_recalc = False
|
||||
|
||||
self.save()
|
||||
|
@ -214,7 +221,14 @@ class MUserStory(mongo.Document):
|
|||
@classmethod
|
||||
def delete_old_stories(cls, feed_id):
|
||||
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
|
||||
MUserStory.objects(feed_id=feed_id, read_date__lte=UNREAD_CUTOFF).delete()
|
||||
cls.objects(feed_id=feed_id, read_date__lte=UNREAD_CUTOFF).delete()
|
||||
|
||||
@classmethod
|
||||
def delete_marked_as_read_stories(cls, user_id, feed_id, mark_read_date=None):
|
||||
if not mark_read_date:
|
||||
usersub = UserSubscription.objects.get(user__pk=user_id, feed__pk=feed_id)
|
||||
mark_read_date = usersub.mark_read_date
|
||||
cls.objects(user_id=user_id, feed_id=feed_id, read_date__lte=usersub.mark_read_date).delete()
|
||||
|
||||
|
||||
class UserSubscriptionFolders(models.Model):
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import datetime
|
||||
import time
|
||||
import random
|
||||
import zlib
|
||||
from django.shortcuts import render_to_response, get_object_or_404
|
||||
|
@ -13,7 +14,7 @@ from django.contrib.auth.models import User
|
|||
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404
|
||||
from django.conf import settings
|
||||
from django.core.mail import mail_admins
|
||||
from mongoengine.queryset import OperationError, Q
|
||||
from mongoengine.queryset import OperationError
|
||||
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
|
||||
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds, apply_classifier_authors, apply_classifier_tags
|
||||
from apps.analyzer.models import get_classifiers_for_user
|
||||
|
@ -28,6 +29,7 @@ from utils.user_functions import get_user, ajax_login_required
|
|||
from utils.feed_functions import fetch_address_from_page, relative_timesince
|
||||
from utils.story_functions import format_story_link_date__short
|
||||
from utils.story_functions import format_story_link_date__long
|
||||
from utils.story_functions import bunch
|
||||
from utils import log as logging
|
||||
from utils.timezones.utilities import localtime_for_timezone
|
||||
|
||||
|
@ -411,23 +413,36 @@ def load_river_stories(request):
|
|||
# if page: offset = limit * page
|
||||
if page: limit = limit * page - read_stories_count
|
||||
|
||||
# Subquery used to find all `MStory`s within the user_sub's Mark as Read date.
|
||||
def feed_qvalues(feed_id):
|
||||
feed = UserSubscription.objects.get(feed__pk=feed_id, user=user)
|
||||
return (feed_id, feed.mark_read_date)
|
||||
return (feed_id, int(time.mktime(feed.mark_read_date.timetuple())))
|
||||
feed_last_reads = dict(map(feed_qvalues, feed_ids))
|
||||
|
||||
# Read stories to exclude
|
||||
read_stories = MUserStory.objects(user_id=user.pk, feed_id__in=feed_ids).only('story')
|
||||
read_stories = [rs.story.id for rs in read_stories]
|
||||
|
||||
# Between excluding what's been read, and what's outside the mark_read date,
|
||||
# every single returned story is unread.
|
||||
# After excluding read stories, all that's left are stories
|
||||
# past the mark_read_date. Everything returned is guaranteed to be unread.
|
||||
mstories = MStory.objects(
|
||||
id__nin=read_stories,
|
||||
story_feed_id__in=feed_ids
|
||||
)[offset:offset+limit]
|
||||
stories = Feed.format_stories(mstories)
|
||||
).map_reduce("""function() {
|
||||
var feed_last_reads = %s;
|
||||
var d = feed_last_reads[this.story_feed_id];
|
||||
if (this.story_date.getTime()/1000 > d) {
|
||||
emit(this._id, this);
|
||||
}
|
||||
}""" % (json.encode(feed_last_reads),),
|
||||
"""function(key, values) {
|
||||
return values[0];
|
||||
}""")
|
||||
mstories = [story.value for story in mstories]
|
||||
stories = []
|
||||
for i, story in enumerate(mstories):
|
||||
if i >= offset + limit: break
|
||||
stories.append(bunch(story))
|
||||
stories = Feed.format_stories(stories)
|
||||
|
||||
starred_stories = MStarredStory.objects(
|
||||
user_id=user.pk,
|
||||
|
@ -435,7 +450,6 @@ def load_river_stories(request):
|
|||
).only('story_guid', 'starred_date')
|
||||
starred_stories = dict([(story.story_guid, story.starred_date)
|
||||
for story in starred_stories])
|
||||
|
||||
|
||||
for story in stories:
|
||||
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
|
||||
|
|
|
@ -44,4 +44,38 @@ def pre_process_story(entry):
|
|||
entry['link'] = urlquote(entry_link)
|
||||
if isinstance(entry.get('guid'), dict):
|
||||
entry['guid'] = unicode(entry['guid'])
|
||||
return entry
|
||||
return entry
|
||||
|
||||
class bunch(dict):
|
||||
"""Example of overloading __getatr__ and __setattr__
|
||||
This example creates a dictionary where members can be accessed as attributes
|
||||
"""
|
||||
def __init__(self, indict=None, attribute=None):
|
||||
if indict is None:
|
||||
indict = {}
|
||||
# set any attributes here - before initialisation
|
||||
# these remain as normal attributes
|
||||
self.attribute = attribute
|
||||
dict.__init__(self, indict)
|
||||
self.__initialised = True
|
||||
# after initialisation, setting attributes is the same as setting an item
|
||||
|
||||
def __getattr__(self, item):
|
||||
"""Maps values to attributes.
|
||||
Only called if there *isn't* an attribute with this name
|
||||
"""
|
||||
try:
|
||||
return self.__getitem__(item)
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def __setattr__(self, item, value):
|
||||
"""Maps attributes to values.
|
||||
Only if we are initialised
|
||||
"""
|
||||
if not self.__dict__.has_key('_bunch__initialised'): # this test allows attributes to be set in the __init__ method
|
||||
return dict.__setattr__(self, item, value)
|
||||
elif self.__dict__.has_key(item): # any normal attributes are handled normally
|
||||
dict.__setattr__(self, item, value)
|
||||
else:
|
||||
self.__setitem__(item, value)
|
Loading…
Add table
Reference in a new issue