NewsBlur-viq/apps/rss_feeds/page_importer.py

401 lines
14 KiB
Python
Raw Normal View History

2024-02-25 15:41:35 -05:00
import http.client
import re
import time
2024-02-25 15:41:35 -05:00
import traceback
import urllib.error
import urllib.parse
import urllib.request
import zlib
2013-05-29 15:59:46 -07:00
from socket import error as SocketError
2024-02-25 15:41:35 -05:00
import feedparser
import requests
from django.conf import settings
2024-02-25 15:41:35 -05:00
from django.contrib.sites.models import Site
from django.utils.encoding import smart_bytes
2021-04-02 14:29:43 -04:00
from django.utils.text import compress_string as compress_string_with_gzip
2024-02-25 15:41:35 -05:00
from mongoengine.queryset import NotUniqueError
2014-05-22 15:10:29 -07:00
from OpenSSL.SSL import Error as OpenSSLError
2014-05-27 13:08:21 -07:00
from pyasn1.error import PyAsn1Error
2021-02-25 19:52:36 -05:00
from sentry_sdk import capture_exception, flush
2024-02-25 15:41:35 -05:00
from apps.rss_feeds.models import MFeedPage
from utils import log as logging
from utils.feed_functions import TimeoutError, timelimit
# from utils.feed_functions import mail_feed_error_to_admin
BROKEN_PAGES = [
2024-04-24 09:43:56 -04:00
"tag:",
"info:",
"uuid:",
"urn:",
"[]",
]
# Also change in reader_utils.js.
BROKEN_PAGE_URLS = [
2024-04-24 09:43:56 -04:00
"nytimes.com",
"github.com",
"washingtonpost.com",
"stackoverflow.com",
"stackexchange.com",
"twitter.com",
"rankexploits",
"gamespot.com",
"espn.com",
"royalroad.com",
]
2024-04-24 09:43:56 -04:00
class PageImporter(object):
def __init__(self, feed, story=None, request=None):
self.feed = feed
self.story = story
self.request = request
2024-04-24 09:43:56 -04:00
@property
def headers(self):
return {
2024-04-24 09:43:56 -04:00
"User-Agent": "NewsBlur Page Fetcher - %s subscriber%s - %s %s"
% (
self.feed.num_subscribers,
2024-04-24 09:43:56 -04:00
"s" if self.feed.num_subscribers != 1 else "",
self.feed.permalink,
2020-11-30 15:48:59 -05:00
self.feed.fake_user_agent,
),
}
2024-04-24 09:43:56 -04:00
def fetch_page(self, urllib_fallback=False, requests_exception=None):
try:
self.fetch_page_timeout(urllib_fallback=urllib_fallback, requests_exception=requests_exception)
except TimeoutError:
2024-04-24 09:43:56 -04:00
logging.user(
self.request,
" ***> [%-30s] ~FBPage fetch ~SN~FRfailed~FB due to timeout" % (self.feed.log_title[:30]),
)
@timelimit(10)
def fetch_page_timeout(self, urllib_fallback=False, requests_exception=None):
html = None
feed_link = self.feed.feed_link
if not feed_link:
self.save_no_page(reason="No feed link")
return
2024-04-24 09:43:56 -04:00
if feed_link.startswith("www"):
self.feed.feed_link = "http://" + feed_link
try:
if any(feed_link.startswith(s) for s in BROKEN_PAGES):
self.save_no_page(reason="Broken page")
return
elif any(s in feed_link.lower() for s in BROKEN_PAGE_URLS):
self.save_no_page(reason="Banned")
return
2024-04-24 09:43:56 -04:00
elif feed_link.startswith("http"):
if urllib_fallback:
2020-06-15 02:54:37 -04:00
request = urllib.request.Request(feed_link, headers=self.headers)
response = urllib.request.urlopen(request)
2024-04-24 09:43:56 -04:00
time.sleep(0.01) # Grrr, GIL.
data = response.read().decode(response.headers.get_content_charset() or "utf-8")
else:
try:
response = requests.get(feed_link, headers=self.headers, timeout=10)
response.connection.close()
except requests.exceptions.TooManyRedirects:
response = requests.get(feed_link, timeout=10)
2024-04-24 09:43:56 -04:00
except (
AttributeError,
SocketError,
OpenSSLError,
PyAsn1Error,
TypeError,
requests.adapters.ReadTimeout,
) as e:
logging.debug(
" ***> [%-30s] Page fetch failed using requests: %s"
% (self.feed.log_title[:30], e)
)
self.save_no_page(reason="Page fetch failed")
2012-10-01 19:31:33 -07:00
return
data = response.text
2024-04-24 09:43:56 -04:00
if response.encoding and response.encoding.lower() != "utf-8":
logging.debug(f" -> ~FBEncoding is {response.encoding}, re-encoding...")
try:
2024-04-24 09:43:56 -04:00
data = data.encode("utf-8").decode("utf-8")
except (LookupError, UnicodeEncodeError):
logging.debug(f" -> ~FRRe-encoding failed!")
pass
else:
try:
2024-04-24 09:43:56 -04:00
data = open(feed_link, "r").read()
except IOError:
2024-04-24 09:43:56 -04:00
self.feed.feed_link = "http://" + feed_link
self.fetch_page(urllib_fallback=True)
return
if data:
html = self.rewrite_page(data)
2019-12-21 10:20:59 -05:00
if html:
self.save_page(html)
else:
self.save_no_page(reason="No HTML found")
2019-12-21 10:20:59 -05:00
return
else:
self.save_no_page(reason="No data found")
return
2024-04-24 09:43:56 -04:00
except (
ValueError,
urllib.error.URLError,
http.client.BadStatusLine,
http.client.InvalidURL,
requests.exceptions.ConnectionError,
) as e:
logging.debug(" ***> [%-30s] Page fetch failed: %s" % (self.feed.log_title[:30], e))
self.feed.save_page_history(401, "Bad URL", e)
2021-02-28 20:39:57 -05:00
try:
fp = feedparser.parse(self.feed.feed_address)
2021-04-19 16:19:27 -04:00
except (urllib.error.HTTPError, urllib.error.URLError) as e:
2021-02-28 20:39:57 -05:00
return html
2024-04-24 09:43:56 -04:00
feed_link = fp.feed.get("link", "")
self.feed.save()
2024-04-24 09:43:56 -04:00
except http.client.IncompleteRead as e:
logging.debug(" ***> [%-30s] Page fetch failed: %s" % (self.feed.log_title[:30], e))
self.feed.save_page_history(500, "IncompleteRead", e)
2024-04-24 09:43:56 -04:00
except (requests.exceptions.RequestException, requests.packages.urllib3.exceptions.HTTPError) as e:
logging.debug(
" ***> [%-30s] Page fetch failed using requests: %s" % (self.feed.log_title[:30], e)
)
# mail_feed_error_to_admin(self.feed, e, local_vars=locals())
return self.fetch_page(urllib_fallback=True, requests_exception=e)
2020-06-15 02:54:37 -04:00
except Exception as e:
2024-04-24 09:43:56 -04:00
logging.debug("[%d] ! -------------------------" % (self.feed.id,))
tb = traceback.format_exc()
logging.debug(tb)
2024-04-24 09:43:56 -04:00
logging.debug("[%d] ! -------------------------" % (self.feed.id,))
self.feed.save_page_history(500, "Error", tb)
# mail_feed_error_to_admin(self.feed, e, local_vars=locals())
2024-04-24 09:43:56 -04:00
if not settings.DEBUG and hasattr(settings, "SENTRY_DSN") and settings.SENTRY_DSN:
2021-02-25 19:52:36 -05:00
capture_exception(e)
flush()
if not urllib_fallback:
self.fetch_page(urllib_fallback=True)
else:
self.feed.save_page_history(200, "OK")
2024-04-24 09:43:56 -04:00
return html
def fetch_story(self):
html = None
try:
html = self._fetch_story()
except TimeoutError:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal story~FY: timed out")
except requests.exceptions.TooManyRedirects:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal story~FY: too many redirects")
2024-04-24 09:43:56 -04:00
return html
@timelimit(10)
def _fetch_story(self):
html = None
story_permalink = self.story.story_permalink
2024-04-24 09:43:56 -04:00
2018-07-16 10:50:29 -04:00
if not self.feed:
return
if any(story_permalink.startswith(s) for s in BROKEN_PAGES):
return
if any(s in story_permalink.lower() for s in BROKEN_PAGE_URLS):
return
2024-04-24 09:43:56 -04:00
if not story_permalink.startswith("http"):
return
try:
response = requests.get(story_permalink, headers=self.headers, timeout=10)
response.connection.close()
2024-04-24 09:43:56 -04:00
except (
AttributeError,
SocketError,
OpenSSLError,
PyAsn1Error,
requests.exceptions.ConnectionError,
requests.exceptions.TooManyRedirects,
requests.adapters.ReadTimeout,
) as e:
try:
response = requests.get(story_permalink, timeout=10)
2024-04-24 09:43:56 -04:00
except (
AttributeError,
SocketError,
OpenSSLError,
PyAsn1Error,
requests.exceptions.ConnectionError,
requests.exceptions.TooManyRedirects,
requests.adapters.ReadTimeout,
) as e:
logging.debug(
" ***> [%-30s] Original story fetch failed using requests: %s"
% (self.feed.log_title[:30], e)
)
return
# try:
data = response.text
# except (LookupError, TypeError):
# data = response.content
2024-04-24 09:43:56 -04:00
# import pdb; pdb.set_trace()
2024-04-24 09:43:56 -04:00
if response.encoding and response.encoding.lower() != "utf-8":
logging.debug(f" -> ~FBEncoding is {response.encoding}, re-encoding...")
try:
2024-04-24 09:43:56 -04:00
data = data.encode("utf-8").decode("utf-8")
except (LookupError, UnicodeEncodeError):
logging.debug(f" -> ~FRRe-encoding failed!")
pass
if data:
2024-04-24 09:43:56 -04:00
data = data.replace("\xc2\xa0", " ") # Non-breaking space, is mangled when encoding is not utf-8
data = data.replace("\\u00a0", " ") # Non-breaking space, is mangled when encoding is not utf-8
html = self.rewrite_page(data)
2020-05-04 09:50:01 -04:00
if not html:
return
self.save_story(html)
2024-04-24 09:43:56 -04:00
return html
2024-04-24 09:43:56 -04:00
def save_story(self, html):
self.story.original_page_z = zlib.compress(smart_bytes(html))
try:
self.story.save()
except NotUniqueError:
pass
def save_no_page(self, reason=None):
2024-04-24 09:43:56 -04:00
logging.debug(
" ---> [%-30s] ~FYNo original page: %s / %s"
% (self.feed.log_title[:30], reason, self.feed.feed_link)
)
self.feed.has_page = False
self.feed.save()
self.feed.save_page_history(404, f"Feed has no original page: {reason}")
def rewrite_page(self, response):
2024-04-24 09:43:56 -04:00
BASE_RE = re.compile(r"<head(.*?)>", re.I)
2020-06-15 02:54:37 -04:00
base_code = '<base href="%s" />' % (self.feed.feed_link,)
2024-04-24 09:43:56 -04:00
html = BASE_RE.sub("<head\1> " + base_code, response)
if "<base href" not in html:
html = "%s %s" % (base_code, html)
2024-04-24 09:43:56 -04:00
2010-04-23 10:44:46 -04:00
# html = self.fix_urls(html)
2024-04-24 09:43:56 -04:00
return html.strip()
2010-04-23 10:44:46 -04:00
def fix_urls(self, document):
# BEWARE: This will rewrite URLs inside of <script> tags. You know, like
# Google Analytics. Ugh.
2024-04-24 09:43:56 -04:00
2010-04-23 10:44:46 -04:00
FIND_RE = re.compile(r'\b(href|src)\s*=\s*("[^"]*"|\'[^\']*\'|[^"\'<>=\s]+)')
ret = []
last_end = 0
2024-04-24 09:43:56 -04:00
2010-04-23 10:44:46 -04:00
for match in FIND_RE.finditer(document):
url = match.group(2)
if url[0] in "\"'":
url = url.strip(url[0])
2020-06-15 02:54:37 -04:00
parsed = urllib.parse.urlparse(url)
2024-04-24 09:43:56 -04:00
if parsed.scheme == parsed.netloc == "": # relative to domain
2020-06-15 02:54:37 -04:00
url = urllib.parse.urljoin(self.feed.feed_link, url)
2024-04-24 09:43:56 -04:00
ret.append(document[last_end : match.start(2)])
2010-04-23 10:44:46 -04:00
ret.append('"%s"' % (url,))
last_end = match.end(2)
ret.append(document[last_end:])
2024-04-24 09:43:56 -04:00
return "".join(ret)
def save_page(self, html):
saved = False
2024-04-24 09:43:56 -04:00
if not html or len(html) < 100:
return
2024-04-24 09:43:56 -04:00
if settings.BACKED_BY_AWS.get("pages_on_node"):
saved = self.save_page_node(html)
2024-04-24 09:43:56 -04:00
if saved and self.feed.s3_page and settings.BACKED_BY_AWS.get("pages_on_s3"):
self.delete_page_s3()
2024-04-24 09:43:56 -04:00
if settings.BACKED_BY_AWS.get("pages_on_s3") and not saved:
saved = self.save_page_s3(html)
2024-04-24 09:43:56 -04:00
if not saved:
try:
feed_page = MFeedPage.objects.get(feed_id=self.feed.pk)
# feed_page.page_data = html.encode('utf-8')
if feed_page.page() == html:
2024-04-24 09:43:56 -04:00
logging.debug(
" ---> [%-30s] ~FYNo change in page data: %s"
% (self.feed.log_title[:30], self.feed.feed_link)
)
else:
# logging.debug(' ---> [%-30s] ~FYChange in page data: %s (%s/%s %s/%s)' % (self.feed.log_title[:30], self.feed.feed_link, type(html), type(feed_page.page()), len(html), len(feed_page.page())))
feed_page.page_data = zlib.compress(smart_bytes(html))
feed_page.save()
except MFeedPage.DoesNotExist:
2024-04-24 09:43:56 -04:00
feed_page = MFeedPage.objects.create(
feed_id=self.feed.pk, page_data=zlib.compress(smart_bytes(html))
)
return feed_page
2024-04-24 09:43:56 -04:00
def save_page_node(self, html):
2024-02-25 17:54:11 -05:00
domain = "node-page.service.consul:8008"
if settings.DOCKERBUILD:
domain = "node:8008"
url = "http://%s/original_page/%s" % (
2021-01-03 11:09:25 -05:00
domain,
self.feed.pk,
)
compressed_html = zlib.compress(smart_bytes(html))
2024-04-24 09:43:56 -04:00
response = requests.post(
url,
files={
"original_page": compressed_html,
# 'original_page': html,
},
)
if response.status_code == 200:
return True
else:
2024-04-24 09:43:56 -04:00
logging.debug(
" ---> [%-30s] ~FRFailed to save page to node: %s (%s bytes)"
% (self.feed.log_title[:30], response.status_code, len(compressed_html))
)
def save_page_s3(self, html):
2024-04-24 09:43:56 -04:00
s3_object = settings.S3_CONN.Object(settings.S3_PAGES_BUCKET_NAME, self.feed.s3_pages_key)
s3_object.put(
Body=compress_string_with_gzip(html.encode("utf-8")),
ContentType="text/html",
ContentEncoding="gzip",
Expires=expires,
ACL="public-read",
)
try:
feed_page = MFeedPage.objects.get(feed_id=self.feed.pk)
feed_page.delete()
2024-04-24 09:43:56 -04:00
logging.debug(" ---> [%-30s] ~FYTransfering page data to S3..." % (self.feed.log_title[:30]))
except MFeedPage.DoesNotExist:
pass
2024-04-24 09:43:56 -04:00
if not self.feed.s3_page:
self.feed.s3_page = True
self.feed.save()
2024-04-24 09:43:56 -04:00
return True
2024-04-24 09:43:56 -04:00
def delete_page_s3(self):
k = settings.S3_CONN.Bucket(settings.S3_PAGES_BUCKET_NAME).Object(key=self.feed.s3_pages_key)
k.delete()
2024-04-24 09:43:56 -04:00
self.feed.s3_page = False
self.feed.save()