home *** CD-ROM | disk | FTP | other *** search
/ Chip 2011 November / CHIP_2011_11.iso / Programy / Narzedzia / Calibre / calibre-0.8.18.msi / file_280 / saskatoon_star_phoenix.recipe < prev    next >
Text File  |  2011-09-09  |  5KB  |  112 lines

  1. #!/usr/bin/env  python
  2.  
  3. __license__   = 'GPL v3'
  4.  
  5. '''
  6. www.canada.com
  7. '''
  8.  
  9. from calibre.web.feeds.recipes import BasicNewsRecipe
  10.  
  11.  
  12. class CanWestPaper(BasicNewsRecipe):
  13.  
  14.     # un-comment the following three lines for the Saskatoon Star-Phoenix
  15.     title = u'Saskatoon Star-Phoenix'
  16.     url_prefix = 'http://www.thestarphoenix.com'
  17.     description = u'News from Saskatoon, SK'
  18.  
  19.     # un-comment the following three lines for the Windsor Star
  20.     #title = u'Windsor Star'
  21.     #url_prefix = 'http://www.windsorstar.com'
  22.     #description = u'News from Windsor, ON'
  23.  
  24.     # un-comment the following three lines for the Ottawa Citizen
  25.     #title = u'Ottawa Citizen'
  26.     #url_prefix = 'http://www.ottawacitizen.com'
  27.     #description = u'News from Ottawa, ON'
  28.  
  29.     # un-comment the following three lines for the Montreal Gazette
  30.     #title = u'Montreal Gazette'
  31.     #url_prefix = 'http://www.montrealgazette.com'
  32.     #description = u'News from Montreal, QC'
  33.  
  34.  
  35.     language = 'en_CA'
  36.     __author__ = 'Nick Redding'
  37.     no_stylesheets = True
  38.     timefmt = ' [%b %d]'
  39.     extra_css = '''
  40.                 .timestamp {  font-size:xx-small; display: block; }
  41.                 #storyheader { font-size: medium; }
  42.                 #storyheader h1 { font-size: x-large; }
  43.                 #storyheader h2 { font-size: large;  font-style: italic; }
  44.                 .byline { font-size:xx-small; }
  45.                 #photocaption { font-size: small; font-style: italic }
  46.                 #photocredit { font-size: xx-small; }'''
  47.     keep_only_tags = [dict(name='div', attrs={'id':'storyheader'}),dict(name='div', attrs={'id':'storycontent'})]
  48.     remove_tags = [{'class':'comments'},
  49.                    dict(name='div', attrs={'class':'navbar'}),dict(name='div', attrs={'class':'morelinks'}),
  50.                    dict(name='div', attrs={'class':'viewmore'}),dict(name='li', attrs={'class':'email'}),
  51.                    dict(name='div', attrs={'class':'story_tool_hr'}),dict(name='div', attrs={'class':'clear'}),
  52.                    dict(name='div', attrs={'class':'story_tool'}),dict(name='div', attrs={'class':'copyright'}),
  53.                    dict(name='div', attrs={'class':'rule_grey_solid'}),
  54.                    dict(name='li', attrs={'class':'print'}),dict(name='li', attrs={'class':'share'}),dict(name='ul', attrs={'class':'bullet'})]
  55.  
  56.     def preprocess_html(self,soup):
  57.         #delete iempty id attributes--they screw up the TOC for unknow reasons
  58.         divtags = soup.findAll('div',attrs={'id':''})
  59.         if divtags:
  60.             for div in divtags:
  61.                 del(div['id'])
  62.         return soup
  63.  
  64.  
  65.     def parse_index(self):
  66.         soup = self.index_to_soup(self.url_prefix+'/news/todays-paper/index.html')
  67.  
  68.         articles = {}
  69.         key = 'News'
  70.         ans = ['News']
  71.  
  72.         # Find each instance of class="sectiontitle", class="featurecontent"
  73.         for divtag in soup.findAll('div',attrs={'class' : ["section_title02","featurecontent"]}):
  74.                 #self.log(" div class = %s" % divtag['class'])
  75.                 if divtag['class'].startswith('section_title'):
  76.                     # div contains section title
  77.                     if not divtag.h3:
  78.                         continue
  79.                     key = self.tag_to_string(divtag.h3,False)
  80.                     ans.append(key)
  81.                     self.log("Section name %s" % key)
  82.                     continue
  83.                 # div contains article data
  84.                 h1tag = divtag.find('h1')
  85.                 if not h1tag:
  86.                     continue
  87.                 atag = h1tag.find('a',href=True)
  88.                 if not atag:
  89.                     continue
  90.                 url = self.url_prefix+'/news/todays-paper/'+atag['href']
  91.                 #self.log("Section %s" % key)
  92.                 #self.log("url %s" % url)
  93.                 title = self.tag_to_string(atag,False)
  94.                 #self.log("title %s" % title)
  95.                 pubdate = ''
  96.                 description = ''
  97.                 ptag = divtag.find('p');
  98.                 if ptag:
  99.                     description = self.tag_to_string(ptag,False)
  100.                     #self.log("description %s" % description)
  101.                 author = ''
  102.                 autag = divtag.find('h4')
  103.                 if autag:
  104.                     author = self.tag_to_string(autag,False)
  105.                     #self.log("author %s" % author)
  106.                 if not articles.has_key(key):
  107.                     articles[key] = []
  108.                 articles[key].append(dict(title=title,url=url,date=pubdate,description=description,author=author,content=''))
  109.  
  110.         ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
  111.         return ans
  112.