home *** CD-ROM | disk | FTP | other *** search
/ Chip 2011 November / CHIP_2011_11.iso / Programy / Narzedzia / Calibre / calibre-0.8.18.msi / file_280 / lwn_weekly.recipe < prev    next >
Text File  |  2011-09-09  |  4KB  |  110 lines

  1. #!/usr/bin/env python
  2.  
  3. __license__   = 'GPL v3'
  4. __copyright__ = '2011, Davide Cavalca <davide125 at tiscali.it>'
  5. '''
  6. lwn.net
  7. '''
  8.  
  9. from calibre.web.feeds.news import BasicNewsRecipe
  10. import re
  11.  
  12. class WeeklyLWN(BasicNewsRecipe):
  13.     title = 'LWN.net Weekly Edition'
  14.     description = 'Weekly summary of what has happened in the free software world.'
  15.     __author__ = 'Davide Cavalca'
  16.     language = 'en'
  17.  
  18.     cover_url = 'http://lwn.net/images/lcorner.png'
  19.     #masthead_url = 'http://lwn.net/images/lcorner.png'
  20.     publication_type = 'magazine'
  21.  
  22.     remove_tags_before = dict(attrs={'class':'PageHeadline'})
  23.     remove_tags_after = dict(attrs={'class':'ArticleText'})
  24.     remove_tags = [dict(name=['h2', 'form'])]
  25.  
  26.     preprocess_regexps = [
  27.         # Remove the <hr> and "Log in to post comments"
  28.         (re.compile(r'<hr.*?comments[)]', re.DOTALL), lambda m: ''),
  29.     ]
  30.  
  31.     conversion_options = { 'linearize_tables' : True }
  32.  
  33.     oldest_article = 7.0
  34.     needs_subscription = 'optional'
  35.  
  36.     def get_browser(self):
  37.         br = BasicNewsRecipe.get_browser()
  38.         if self.username is not None and self.password is not None:
  39.             br.open('https://lwn.net/login')
  40.             br.select_form(name='loginform')
  41.             br['Username'] = self.username
  42.             br['Password'] = self.password
  43.             br.submit()
  44.         return br
  45.  
  46.     def parse_index(self):
  47.         if self.username is not None and self.password is not None:
  48.             index_url = 'http://lwn.net/current/bigpage?format=printable'
  49.         else:
  50.             index_url = 'http://lwn.net/free/bigpage?format=printable'
  51.         soup = self.index_to_soup(index_url)
  52.         body = soup.body
  53.  
  54.         articles = {}
  55.         ans = []
  56.         url_re = re.compile('^/Articles/')
  57.  
  58.         while True:
  59.             tag_title = body.findNext(name='p', attrs={'class':'SummaryHL'})
  60.             if tag_title == None:
  61.                 break
  62.  
  63.             tag_section = tag_title.findPrevious(name='p', attrs={'class':'Cat1HL'})
  64.             if tag_section == None:
  65.                 section = 'Front Page'
  66.             else:
  67.                 section = tag_section.string
  68.  
  69.             tag_section2 = tag_title.findPrevious(name='p', attrs={'class':'Cat2HL'})
  70.             if tag_section2 != None:
  71.                 if tag_section2.findPrevious(name='p', attrs={'class':'Cat1HL'}) == tag_section:
  72.                     section = "%s: %s" %(section, tag_section2.string)
  73.  
  74.             if section not in articles.keys():
  75.                 articles[section] = []
  76.             if section not in ans:
  77.                 ans.append(section)
  78.  
  79.             body = tag_title
  80.             while True:
  81.                 tag_url = body.findNext(name='a', attrs={'href':url_re})
  82.                 if tag_url == None:
  83.                     break
  84.                 body = tag_url
  85.                 if tag_url.string == None:
  86.                     continue
  87.                 elif tag_url.string == 'Full Story':
  88.                     break
  89.                 elif tag_url.string.startswith('Comments ('):
  90.                     break
  91.                 else:
  92.                     continue
  93.  
  94.             if tag_url == None:
  95.                 break
  96.  
  97.             article = dict(
  98.                 title=tag_title.string,
  99.                 url= 'http://lwn.net' + tag_url['href'].split('#')[0] + '?format=printable',
  100.                 description='', content='', date='')
  101.             articles[section].append(article)
  102.  
  103.         ans = [(key, articles[key]) for key in ans if articles.has_key(key)]
  104.         if not ans:
  105.             raise Exception('Could not find any articles.')
  106.  
  107.         return ans
  108.  
  109. # vim: expandtab:ts=4:sw=4
  110.