NewsBlur-viq/apps/rss_feeds/page_importer.py

354 lines
14 KiB
Python
Raw Normal View History

import requests
import re
import traceback
import feedparser
import time
2020-06-15 02:54:37 -04:00
import urllib.request, urllib.error, urllib.parse
import http.client
import zlib
from django.contrib.sites.models import Site
from django.utils.encoding import smart_bytes
2015-07-07 13:34:06 -07:00
from mongoengine.queryset import NotUniqueError
2013-05-29 15:59:46 -07:00
from socket import error as SocketError
2012-09-18 17:09:07 -07:00
from boto.s3.key import Key
from django.conf import settings
2021-04-02 14:29:43 -04:00
from django.utils.text import compress_string as compress_string_with_gzip
2010-08-16 15:45:35 -04:00
from utils import log as logging
from apps.rss_feeds.models import MFeedPage
from utils.feed_functions import timelimit, TimeoutError
2014-05-22 15:10:29 -07:00
from OpenSSL.SSL import Error as OpenSSLError
2014-05-27 13:08:21 -07:00
from pyasn1.error import PyAsn1Error
2021-02-25 19:52:36 -05:00
from sentry_sdk import capture_exception, flush
# from utils.feed_functions import mail_feed_error_to_admin
BROKEN_PAGES = [
'tag:',
'info:',
'uuid:',
'urn:',
'[]',
]
# Also change in reader_utils.js.
BROKEN_PAGE_URLS = [
'nytimes.com',
2013-03-02 11:18:12 -08:00
'github.com',
'washingtonpost.com',
'stackoverflow.com',
'stackexchange.com',
'twitter.com',
'rankexploits',
'gamespot.com',
2017-11-02 16:39:36 -07:00
'espn.com',
'royalroad.com',
]
class PageImporter(object):
def __init__(self, feed, story=None, request=None):
self.feed = feed
self.story = story
self.request = request
@property
def headers(self):
return {
2020-11-30 15:48:59 -05:00
'User-Agent': 'NewsBlur Page Fetcher - %s subscriber%s - %s %s' % (
self.feed.num_subscribers,
's' if self.feed.num_subscribers != 1 else '',
self.feed.permalink,
2020-11-30 15:48:59 -05:00
self.feed.fake_user_agent,
),
}
def fetch_page(self, urllib_fallback=False, requests_exception=None):
try:
self.fetch_page_timeout(urllib_fallback=urllib_fallback, requests_exception=requests_exception)
except TimeoutError:
logging.user(self.request, ' ***> [%-30s] ~FBPage fetch ~SN~FRfailed~FB due to timeout' % (self.feed.log_title[:30]))
@timelimit(10)
def fetch_page_timeout(self, urllib_fallback=False, requests_exception=None):
html = None
feed_link = self.feed.feed_link
if not feed_link:
self.save_no_page(reason="No feed link")
return
if feed_link.startswith('www'):
self.feed.feed_link = 'http://' + feed_link
try:
if any(feed_link.startswith(s) for s in BROKEN_PAGES):
self.save_no_page(reason="Broken page")
return
elif any(s in feed_link.lower() for s in BROKEN_PAGE_URLS):
self.save_no_page(reason="Broke page url")
return
elif feed_link.startswith('http'):
if urllib_fallback:
2020-06-15 02:54:37 -04:00
request = urllib.request.Request(feed_link, headers=self.headers)
response = urllib.request.urlopen(request)
time.sleep(0.01) # Grrr, GIL.
data = response.read().decode(response.headers.get_content_charset() or 'utf-8')
else:
try:
response = requests.get(feed_link, headers=self.headers, timeout=10)
response.connection.close()
except requests.exceptions.TooManyRedirects:
response = requests.get(feed_link, timeout=10)
except (AttributeError, SocketError, OpenSSLError, PyAsn1Error, TypeError,
requests.adapters.ReadTimeout) as e:
logging.debug(' ***> [%-30s] Page fetch failed using requests: %s' % (self.feed.log_title[:30], e))
self.save_no_page(reason="Page fetch failed")
2012-10-01 19:31:33 -07:00
return
data = response.text
if response.encoding and response.encoding.lower() != 'utf-8':
logging.debug(f" -> ~FBEncoding is {response.encoding}, re-encoding...")
try:
data = data.encode('utf-8').decode('utf-8')
except (LookupError, UnicodeEncodeError):
logging.debug(f" -> ~FRRe-encoding failed!")
pass
else:
try:
data = open(feed_link, 'r').read()
except IOError:
self.feed.feed_link = 'http://' + feed_link
self.fetch_page(urllib_fallback=True)
return
if data:
html = self.rewrite_page(data)
2019-12-21 10:20:59 -05:00
if html:
self.save_page(html)
else:
self.save_no_page(reason="No HTML found")
2019-12-21 10:20:59 -05:00
return
else:
self.save_no_page(reason="No data found")
return
2020-06-15 02:54:37 -04:00
except (ValueError, urllib.error.URLError, http.client.BadStatusLine, http.client.InvalidURL,
requests.exceptions.ConnectionError) as e:
self.feed.save_page_history(401, "Bad URL", e)
2021-02-28 20:39:57 -05:00
try:
fp = feedparser.parse(self.feed.feed_address)
2021-04-19 16:19:27 -04:00
except (urllib.error.HTTPError, urllib.error.URLError) as e:
2021-02-28 20:39:57 -05:00
return html
feed_link = fp.feed.get('link', "")
self.feed.save()
logging.debug(' ***> [%-30s] Page fetch failed: %s' % (self.feed.log_title[:30], e))
2020-06-15 02:54:37 -04:00
except (urllib.error.HTTPError) as e:
self.feed.save_page_history(e.code, e.msg, e.fp.read())
2020-06-15 02:54:37 -04:00
except (http.client.IncompleteRead) as e:
self.feed.save_page_history(500, "IncompleteRead", e)
except (requests.exceptions.RequestException,
2020-06-15 02:54:37 -04:00
requests.packages.urllib3.exceptions.HTTPError) as e:
logging.debug(' ***> [%-30s] Page fetch failed using requests: %s' % (self.feed.log_title[:30], e))
# mail_feed_error_to_admin(self.feed, e, local_vars=locals())
return self.fetch_page(urllib_fallback=True, requests_exception=e)
2020-06-15 02:54:37 -04:00
except Exception as e:
logging.debug('[%d] ! -------------------------' % (self.feed.id,))
tb = traceback.format_exc()
logging.debug(tb)
logging.debug('[%d] ! -------------------------' % (self.feed.id,))
self.feed.save_page_history(500, "Error", tb)
# mail_feed_error_to_admin(self.feed, e, local_vars=locals())
2021-02-25 19:52:36 -05:00
if (not settings.DEBUG and hasattr(settings, 'SENTRY_DSN') and
settings.SENTRY_DSN):
capture_exception(e)
flush()
if not urllib_fallback:
self.fetch_page(urllib_fallback=True)
else:
self.feed.save_page_history(200, "OK")
return html
def fetch_story(self):
html = None
try:
html = self._fetch_story()
except TimeoutError:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal story~FY: timed out")
except requests.exceptions.TooManyRedirects:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal story~FY: too many redirects")
return html
@timelimit(10)
def _fetch_story(self):
html = None
story_permalink = self.story.story_permalink
2018-07-16 10:50:29 -04:00
if not self.feed:
return
if any(story_permalink.startswith(s) for s in BROKEN_PAGES):
return
if any(s in story_permalink.lower() for s in BROKEN_PAGE_URLS):
return
if not story_permalink.startswith('http'):
return
try:
response = requests.get(story_permalink, headers=self.headers, timeout=10)
response.connection.close()
except (AttributeError, SocketError, OpenSSLError, PyAsn1Error,
requests.exceptions.ConnectionError,
requests.exceptions.TooManyRedirects,
requests.adapters.ReadTimeout) as e:
try:
response = requests.get(story_permalink, timeout=10)
except (AttributeError, SocketError, OpenSSLError, PyAsn1Error,
requests.exceptions.ConnectionError,
requests.exceptions.TooManyRedirects,
requests.adapters.ReadTimeout) as e:
logging.debug(' ***> [%-30s] Original story fetch failed using requests: %s' % (self.feed.log_title[:30], e))
return
# try:
data = response.text
# except (LookupError, TypeError):
# data = response.content
# import pdb; pdb.set_trace()
if response.encoding and response.encoding.lower() != 'utf-8':
logging.debug(f" -> ~FBEncoding is {response.encoding}, re-encoding...")
try:
data = data.encode('utf-8').decode('utf-8')
except (LookupError, UnicodeEncodeError):
logging.debug(f" -> ~FRRe-encoding failed!")
pass
if data:
data = data.replace("\xc2\xa0", " ") # Non-breaking space, is mangled when encoding is not utf-8
2020-06-15 02:54:37 -04:00
data = data.replace("\\u00a0", " ") # Non-breaking space, is mangled when encoding is not utf-8
html = self.rewrite_page(data)
2020-05-04 09:50:01 -04:00
if not html:
return
self.save_story(html)
return html
def save_story(self, html):
self.story.original_page_z = zlib.compress(smart_bytes(html))
try:
self.story.save()
except NotUniqueError:
pass
def save_no_page(self, reason=None):
logging.debug(' ---> [%-30s] ~FYNo original page: %s / %s' % (self.feed.log_title[:30], reason, self.feed.feed_link))
self.feed.has_page = False
self.feed.save()
self.feed.save_page_history(404, "Feed has no original page.")
def rewrite_page(self, response):
BASE_RE = re.compile(r'<head(.*?)>', re.I)
2020-06-15 02:54:37 -04:00
base_code = '<base href="%s" />' % (self.feed.feed_link,)
html = BASE_RE.sub('<head\1> '+base_code, response)
2010-04-23 10:44:46 -04:00
if '<base href' not in html:
html = "%s %s" % (base_code, html)
2010-04-23 10:44:46 -04:00
# html = self.fix_urls(html)
return html.strip()
2010-04-23 10:44:46 -04:00
def fix_urls(self, document):
# BEWARE: This will rewrite URLs inside of <script> tags. You know, like
# Google Analytics. Ugh.
FIND_RE = re.compile(r'\b(href|src)\s*=\s*("[^"]*"|\'[^\']*\'|[^"\'<>=\s]+)')
ret = []
last_end = 0
for match in FIND_RE.finditer(document):
url = match.group(2)
if url[0] in "\"'":
url = url.strip(url[0])
2020-06-15 02:54:37 -04:00
parsed = urllib.parse.urlparse(url)
2010-04-23 10:44:46 -04:00
if parsed.scheme == parsed.netloc == '': #relative to domain
2020-06-15 02:54:37 -04:00
url = urllib.parse.urljoin(self.feed.feed_link, url)
2010-04-23 10:44:46 -04:00
ret.append(document[last_end:match.start(2)])
ret.append('"%s"' % (url,))
last_end = match.end(2)
ret.append(document[last_end:])
return ''.join(ret)
def save_page(self, html):
saved = False
if not html or len(html) < 100:
return
if settings.BACKED_BY_AWS.get('pages_on_node'):
saved = self.save_page_node(html)
if saved and self.feed.s3_page and settings.BACKED_BY_AWS.get('pages_on_s3'):
self.delete_page_s3()
if settings.BACKED_BY_AWS.get('pages_on_s3') and not saved:
saved = self.save_page_s3(html)
if not saved:
try:
feed_page = MFeedPage.objects.get(feed_id=self.feed.pk)
# feed_page.page_data = html.encode('utf-8')
if feed_page.page() == html:
logging.debug(' ---> [%-30s] ~FYNo change in page data: %s' % (self.feed.log_title[:30], self.feed.feed_link))
else:
# logging.debug(' ---> [%-30s] ~FYChange in page data: %s (%s/%s %s/%s)' % (self.feed.log_title[:30], self.feed.feed_link, type(html), type(feed_page.page()), len(html), len(feed_page.page())))
feed_page.page_data = zlib.compress(smart_bytes(html))
feed_page.save()
except MFeedPage.DoesNotExist:
feed_page = MFeedPage.objects.create(feed_id=self.feed.pk,
page_data=zlib.compress(smart_bytes(html)))
return feed_page
def save_page_node(self, html):
2021-01-03 11:09:25 -05:00
domain = Site.objects.get_current().domain
url = "https://%s/original_page/%s" % (
domain,
self.feed.pk,
)
compressed_html = zlib.compress(smart_bytes(html))
response = requests.post(url, files={
'original_page': compressed_html,
2016-11-30 17:17:40 -08:00
# 'original_page': html,
})
if response.status_code == 200:
return True
else:
logging.debug(' ---> [%-30s] ~FRFailed to save page to node: %s (%s bytes)' % (self.feed.log_title[:30], response.status_code, len(compressed_html)))
def save_page_s3(self, html):
2017-03-28 21:17:06 -07:00
k = Key(settings.S3_CONN.get_bucket(settings.S3_PAGES_BUCKET_NAME))
k.key = self.feed.s3_pages_key
k.set_metadata('Content-Encoding', 'gzip')
k.set_metadata('Content-Type', 'text/html')
k.set_metadata('Access-Control-Allow-Origin', '*')
2021-04-02 14:29:43 -04:00
k.set_contents_from_string(compress_string_with_gzip(html.encode('utf-8')))
k.set_acl('public-read')
try:
feed_page = MFeedPage.objects.get(feed_id=self.feed.pk)
feed_page.delete()
logging.debug(' ---> [%-30s] ~FYTransfering page data to S3...' % (self.feed.log_title[:30]))
except MFeedPage.DoesNotExist:
pass
if not self.feed.s3_page:
self.feed.s3_page = True
self.feed.save()
return True
def delete_page_s3(self):
2017-03-28 21:17:06 -07:00
k = Key(settings.S3_CONN.get_bucket(settings.S3_PAGES_BUCKET_NAME))
k.key = self.feed.s3_pages_key
k.delete()
self.feed.s3_page = False
self.feed.save()