diff --git a/apps/feed_import/models.py b/apps/feed_import/models.py index 3a71797ed..e1c6cd10f 100644 --- a/apps/feed_import/models.py +++ b/apps/feed_import/models.py @@ -56,6 +56,8 @@ class OPMLImporter(Importer): if not hasattr(feed, 'title'): setattr(feed, 'title', feed.htmlUrl) feed_address = urlnorm.normalize(feed.xmlUrl) + if len(feed_address) > 255: + continue feed_link = urlnorm.normalize(feed.htmlUrl) logging.info(' ---> \t%s - %s - %s' % (feed.title, feed_link, feed_address,)) feed_data = dict(feed_address=feed_address, feed_link=feed_link, feed_title=feed.title) @@ -121,6 +123,9 @@ class GoogleReaderImporter(Importer): feed_link = urlnorm.normalize(feed_link) feed_address = urlnorm.normalize(feed_address) + if len(feed_address) > 255: + return folders + # See if it exists as a duplicate first duplicate_feed = DuplicateFeed.objects.filter(duplicate_address=feed_address) if duplicate_feed: diff --git a/apps/rss_feeds/models.py b/apps/rss_feeds/models.py index eab059362..671e3ef49 100644 --- a/apps/rss_feeds/models.py +++ b/apps/rss_feeds/models.py @@ -733,7 +733,7 @@ class FeedFetchHistory(models.Model): self.fetch_date, self.status_code, self.message, - self.exception[:50] + self.exception and self.exception[:50] ) class PageFetchHistory(models.Model): @@ -750,7 +750,7 @@ class PageFetchHistory(models.Model): self.fetch_date, self.status_code, self.message, - self.exception[:50] + self.exception and self.exception[:50] ) class DuplicateFeed(models.Model): diff --git a/templates/reader/feeds.xhtml b/templates/reader/feeds.xhtml index 290218f4f..5ab9de4f4 100644 --- a/templates/reader/feeds.xhtml +++ b/templates/reader/feeds.xhtml @@ -185,6 +185,17 @@ + +
+
Important information
+ + + + + + +
Today, August 25thI am aware that approximately 20% of all feeds are failing to fetch and parse. The fix will be coming sometime today or tomorrow.
+
{% endif %}