import socket socket.setdefaulttimeout(15) import urllib2 import re import urlparse import multiprocessing import traceback import feedparser from utils import log as logging from apps.rss_feeds.models import MFeedPage class PageImporter(object): def __init__(self, url, feed): self.url = url self.feed = feed self.lock = multiprocessing.Lock() def fetch_page(self): if not self.url: return try: request = urllib2.Request(self.url) response = urllib2.urlopen(request) data = response.read() html = self.rewrite_page(data) self.save_page(html) except ValueError, e: self.feed.save_page_history(401, "Bad URL", e) fp = feedparser.parse(self.feed.feed_address) self.feed.feed_link = fp.feed.get('link', "") self.feed.save() except urllib2.HTTPError, e: self.feed.save_page_history(e.code, e.msg, e.fp.read()) return except Exception, e: logging.debug('[%d] ! -------------------------' % (self.feed.id,)) tb = traceback.format_exc() logging.debug(tb) logging.debug('[%d] ! -------------------------' % (self.feed.id,)) self.feed.save_page_history(500, "Error", tb) return self.feed.save_page_history(200, "OK") def rewrite_page(self, response): BASE_RE = re.compile(r'
)', re.I) base_code = u'