2010-07-08 01:07:37 -04:00
|
|
|
import socket
|
|
|
|
socket.setdefaulttimeout(2)
|
2009-08-13 03:26:12 +00:00
|
|
|
import urllib2
|
|
|
|
import re
|
2010-04-23 10:44:46 -04:00
|
|
|
import urlparse
|
2009-09-16 02:34:04 +00:00
|
|
|
import multiprocessing
|
2010-07-08 11:37:54 -04:00
|
|
|
import traceback
|
2010-06-27 21:03:29 -04:00
|
|
|
from apps.rss_feeds.models import FeedPage
|
2009-08-13 03:26:12 +00:00
|
|
|
|
|
|
|
class PageImporter(object):
|
|
|
|
|
|
|
|
def __init__(self, url, feed):
|
|
|
|
self.url = url
|
|
|
|
self.feed = feed
|
2009-09-16 02:34:04 +00:00
|
|
|
self.lock = multiprocessing.Lock()
|
2009-08-13 03:26:12 +00:00
|
|
|
|
|
|
|
def fetch_page(self):
|
2010-07-06 18:16:41 -04:00
|
|
|
if not self.url:
|
|
|
|
return
|
2010-07-08 11:37:54 -04:00
|
|
|
|
|
|
|
try:
|
|
|
|
request = urllib2.Request(self.url)
|
|
|
|
response = urllib2.urlopen(request)
|
|
|
|
data = response.read()
|
|
|
|
html = self.rewrite_page(data)
|
|
|
|
self.save_page(html)
|
|
|
|
except urllib2.HTTPError, e:
|
|
|
|
print "HTTP Error: %s" % e
|
|
|
|
self.feed.save_page_history(e.code, e.msg, e.fp.read())
|
|
|
|
return
|
|
|
|
except Exception, e:
|
|
|
|
print '[%d] ! -------------------------' % (self.feed.id,)
|
|
|
|
tb = traceback.format_exc()
|
|
|
|
print tb
|
|
|
|
print '[%d] ! -------------------------' % (self.feed.id,)
|
|
|
|
self.feed.save_page_history(500, "Error", tb)
|
|
|
|
return
|
|
|
|
|
|
|
|
self.feed.save_page_history(200, "OK")
|
2009-08-13 03:26:12 +00:00
|
|
|
|
|
|
|
def rewrite_page(self, response):
|
2010-04-23 10:44:46 -04:00
|
|
|
BASE_RE = re.compile(r'<head(.*?\>)', re.I)
|
2009-08-13 03:26:12 +00:00
|
|
|
base_code = u'<base href="%s" />' % (self.feed.feed_link,)
|
|
|
|
try:
|
2010-04-23 10:44:46 -04:00
|
|
|
html = BASE_RE.sub(r'<head\1 '+base_code, response)
|
2009-08-13 03:26:12 +00:00
|
|
|
except:
|
|
|
|
response = response.decode('latin1').encode('utf-8')
|
2010-04-23 10:44:46 -04:00
|
|
|
html = BASE_RE.sub(r'<head\1 '+base_code, response)
|
|
|
|
|
|
|
|
# html = self.fix_urls(html)
|
2009-08-13 03:26:12 +00:00
|
|
|
|
2010-06-24 15:27:25 -04:00
|
|
|
return html.strip()
|
2010-04-23 10:44:46 -04:00
|
|
|
|
|
|
|
def fix_urls(self, document):
|
|
|
|
# BEWARE: This will rewrite URLs inside of <script> tags. You know, like
|
|
|
|
# Google Analytics. Ugh.
|
|
|
|
|
|
|
|
FIND_RE = re.compile(r'\b(href|src)\s*=\s*("[^"]*"|\'[^\']*\'|[^"\'<>=\s]+)')
|
|
|
|
ret = []
|
|
|
|
last_end = 0
|
|
|
|
|
|
|
|
for match in FIND_RE.finditer(document):
|
|
|
|
url = match.group(2)
|
|
|
|
if url[0] in "\"'":
|
|
|
|
url = url.strip(url[0])
|
|
|
|
parsed = urlparse.urlparse(url)
|
|
|
|
if parsed.scheme == parsed.netloc == '': #relative to domain
|
|
|
|
url = urlparse.urljoin(self.feed.feed_link, url)
|
|
|
|
ret.append(document[last_end:match.start(2)])
|
|
|
|
ret.append('"%s"' % (url,))
|
|
|
|
last_end = match.end(2)
|
|
|
|
ret.append(document[last_end:])
|
|
|
|
|
|
|
|
return ''.join(ret)
|
2009-08-13 03:26:12 +00:00
|
|
|
|
|
|
|
def save_page(self, html):
|
2010-06-24 16:31:38 -04:00
|
|
|
if html and len(html) > 100:
|
2010-06-27 21:03:29 -04:00
|
|
|
feed_page, _ = FeedPage.objects.get_or_create(feed=self.feed)
|
|
|
|
feed_page.page_data = html
|
2010-07-01 17:33:58 -04:00
|
|
|
feed_page.save()
|