From c20eed028c301b967d77ab429f121d3c0eee8037 Mon Sep 17 00:00:00 2001 From: Samuel Clay Date: Wed, 22 Jun 2022 15:44:39 -0400 Subject: [PATCH] Attempting to impose longer time limits for archive fetch. --- apps/reader/models.py | 6 ++++-- newsblur_web/settings.py | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/apps/reader/models.py b/apps/reader/models.py index 9163a44c2..7a6bef0f7 100644 --- a/apps/reader/models.py +++ b/apps/reader/models.py @@ -580,7 +580,8 @@ class UserSubscription(models.Model): def schedule_fetch_archive_feeds_for_user(cls, user_id): from apps.profile.tasks import FetchArchiveFeedsForUser FetchArchiveFeedsForUser.apply_async(kwargs=dict(user_id=user_id), - queue='search_indexer') + queue='search_indexer', + time_limit=settings.MAX_SECONDS_COMPLETE_ARCHIVE_FETCH) # Should be run as a background task @classmethod @@ -611,7 +612,8 @@ class UserSubscription(models.Model): search_chunks = [FetchArchiveFeedsChunk.s(feed_ids=feed_id_chunk, user_id=user_id - ).set(queue='search_indexer').set(time_limit=1500) + ).set(queue='search_indexer') + .set(time_limit=settings.MAX_SECONDS_ARCHIVE_FETCH_SINGLE_FEED) for feed_id_chunk in feed_id_chunks] callback = FinishFetchArchiveFeeds.s(user_id=user_id, start_time=start_time, diff --git a/newsblur_web/settings.py b/newsblur_web/settings.py index 418008aa8..2c34d02ed 100644 --- a/newsblur_web/settings.py +++ b/newsblur_web/settings.py @@ -97,6 +97,8 @@ PAYPAL_TEST = False DATA_UPLOAD_MAX_MEMORY_SIZE = 5242880 # 5 MB FILE_UPLOAD_MAX_MEMORY_SIZE = 5242880 # 5 MB PROMETHEUS_EXPORT_MIGRATIONS = False +MAX_SECONDS_COMPLETE_ARCHIVE_FETCH = 60 * 60 * 1 # 1 hour +MAX_SECONDS_ARCHIVE_FETCH_SINGLE_FEED = 60 * 10 # 10 minutes # Uncomment below to force all feeds to store this many stories. Default is to cut # off at 25 stories for single subscriber non-premium feeds and 500 for popular feeds.