home *** CD-ROM | disk | FTP | other *** search
/ Chip 2011 November / CHIP_2011_11.iso / Programy / Narzedzia / Calibre / calibre-0.8.18.msi / file_280 / wired_uk.recipe < prev    next >
Text File  |  2011-09-09  |  6KB  |  150 lines

  1. __license__   = 'GPL v3'
  2. __copyright__ = '2011, Starson17 <Starson17 at gmail.com>'
  3. '''
  4. www.wired.co.uk
  5. '''
  6.  
  7. from calibre import strftime
  8. from calibre.web.feeds.news import BasicNewsRecipe
  9. import re
  10.  
  11. class Wired_UK(BasicNewsRecipe):
  12.     title                 = 'Wired Magazine - UK edition'
  13.     __author__            = 'Starson17'
  14.     __version__           = 'v1.30'
  15.     __date__              = '15 July 2011'
  16.     description           = 'Gaming news'
  17.     publisher             = 'Conde Nast Digital'
  18.     category              = 'news, games, IT, gadgets'
  19.     oldest_article        = 40
  20.     max_articles_per_feed = 100
  21.     no_stylesheets        = True
  22.     encoding              = 'utf-8'
  23.     use_embedded_content  = False
  24.     #masthead_url          = 'http://www.wired.co.uk/_/media/wired-logo_UK.gif'
  25.     language              = 'en_GB'
  26.     index                 = 'http://www.wired.co.uk'
  27.  
  28.     conversion_options = {
  29.                           'comment'   : description
  30.                         , 'tags'      : category
  31.                         , 'publisher' : publisher
  32.                         , 'language'  : language
  33.                         }
  34.  
  35.     keep_only_tags = [dict(name='div', attrs={'class':['layoutColumn1']})]
  36.     remove_tags = [dict(name='div',attrs={'class':['articleSidebar1','commentAddBox linkit','commentCountBox commentCountBoxBig']})]
  37.     remove_tags_after = dict(name='div',attrs={'class':['mainCopy entry-content','mainCopy']})
  38.     '''
  39.     remove_attributes = ['height','width']
  40.                    ,dict(name=['object','embed','iframe','link'])
  41.                    ,dict(attrs={'class':['opts','comment','stories']})
  42.                    ]
  43.     '''
  44.     def parse_index(self):
  45.         totalfeeds = []
  46.         soup   = self.index_to_soup(self.index)
  47.         recentcontent = soup.find('ul',attrs={'class':'linkList3'})
  48.         mfeed = []
  49.         if recentcontent:
  50.           for li in recentcontent.findAll('li'):
  51.             a = li.h2.a
  52.             url  = self.index + a['href'] + '?page=all'
  53.             title = self.tag_to_string(a)
  54.             description = ''
  55.             date  = strftime(self.timefmt)
  56.             mfeed.append({
  57.                   'title'      :title
  58.                  ,'date'       :date
  59.                  ,'url'        :url
  60.                  ,'description':description
  61.                 })
  62.         totalfeeds.append(('Wired UK Magazine Latest News', mfeed))
  63.         popmagcontent = soup.findAll('div',attrs={'class':'sidebarLinkList'})
  64.         magcontent = popmagcontent[1]
  65.         mfeed2 = []
  66.         if magcontent:
  67.           a = magcontent.h3.a
  68.           if a:
  69.             url   = self.index + a['href'] + '?page=all'
  70.             title = self.tag_to_string(a)
  71.             description = ''
  72.             date  = strftime(self.timefmt)
  73.             mfeed2.append({
  74.                   'title'      :title
  75.                  ,'date'       :date
  76.                  ,'url'        :url
  77.                  ,'description':description
  78.                 })
  79.           for li in magcontent.findAll('li'):
  80.             a = li.a
  81.             url   = self.index + a['href'] + '?page=all'
  82.             title = self.tag_to_string(a)
  83.             description = ''
  84.             date  = strftime(self.timefmt)
  85.             mfeed2.append({
  86.                   'title'      :title
  87.                  ,'date'       :date
  88.                  ,'url'        :url
  89.                  ,'description':description
  90.                 })
  91.           totalfeeds.append(('Wired UK Magazine Features', mfeed2))
  92.  
  93.         magsoup = self.index_to_soup(self.index + '/magazine')
  94.         startcontent = magsoup.find('h3',attrs={'class':'magSubSectionTitle titleStart'}).parent
  95.         mfeed3 = []
  96.         if startcontent:
  97.           for li in startcontent.findAll('li'):
  98.             a = li.a
  99.             url   = self.index + a['href'] + '?page=all'
  100.             title = self.tag_to_string(a)
  101.             description = ''
  102.             date  = strftime(self.timefmt)
  103.             mfeed3.append({
  104.                   'title'      :title
  105.                  ,'date'       :date
  106.                  ,'url'        :url
  107.                  ,'description':description
  108.                 })
  109.           totalfeeds.append(('Wired UK Magazine More', mfeed3))
  110.  
  111.         playcontent = magsoup.find('h3',attrs={'class':'magSubSectionTitle titlePlay'}).parent
  112.         mfeed4 = []
  113.         if playcontent:
  114.           for li in playcontent.findAll('li'):
  115.             a = li.a
  116.             url   = self.index + a['href'] + '?page=all'
  117.             title = self.tag_to_string(a)
  118.             description = ''
  119.             date  = strftime(self.timefmt)
  120.             mfeed4.append({
  121.                   'title'      :title
  122.                  ,'date'       :date
  123.                  ,'url'        :url
  124.                  ,'description':description
  125.                 })
  126.           totalfeeds.append(('Wired UK Magazine Play', mfeed4))
  127.         return totalfeeds
  128.  
  129.     def get_cover_url(self):
  130.         cover_url = ''
  131.         soup = self.index_to_soup(self.index + '/magazine/archive')
  132.         cover_item = soup.find('div', attrs={'class':'image linkme'})
  133.         if cover_item:
  134.            cover_url = cover_item.img['src']
  135.         return cover_url
  136.  
  137.     def preprocess_html(self, soup):
  138.         for tag in soup.findAll(name='p'):
  139.             if tag.find(name='span', text=re.compile(r'This article was taken from.*', re.DOTALL|re.IGNORECASE)):
  140.                 tag.extract()
  141.         return soup
  142.  
  143.     extra_css = '''
  144.                     h1{font-family:Arial,Helvetica,sans-serif; font-weight:bold;font-size:large;}
  145.                     h2{font-family:Arial,Helvetica,sans-serif; font-weight:normal;font-size:small;}
  146.                     p{font-family:Arial,Helvetica,sans-serif;font-size:small;}
  147.                     body{font-family:Helvetica,Arial,sans-serif;font-size:small;}
  148.         '''
  149.  
  150.