home *** CD-ROM | disk | FTP | other *** search
/ Chip 2011 November / CHIP_2011_11.iso / Programy / Narzedzia / Calibre / calibre-0.8.18.msi / file_280 / lemonde_dip.recipe < prev    next >
Text File  |  2011-09-09  |  5KB  |  105 lines

  1. __license__   = 'GPL v3'
  2. __copyright__ = '2008-2011, Darko Miletic <darko.miletic at gmail.com>'
  3. '''
  4. mondediplo.com
  5. '''
  6.  
  7. import urllib
  8. from calibre import strftime
  9. from calibre.web.feeds.news import BasicNewsRecipe
  10.  
  11. class LeMondeDiplomatiqueEn(BasicNewsRecipe):
  12.     title                  = 'Le Monde diplomatique - English edition'
  13.     __author__             = 'Darko Miletic'
  14.     description            = "Le Monde diplomatique is the place you go when you want to know what's really happening. This is a major international paper that is truly independent, that sees the world in fresh ways, that focuses on places no other publications reach. We offer a clear, considered view of the conflicting interests and complexities of a modern global world. LMD in English is a concise version of the Paris-based parent edition, publishing all the major stories each month, expertly translated, and with some London-based commissions too. We offer a taster of LMD quality on our website where a selection of articles are available each month."
  15.     publisher              = 'Le Monde diplomatique'
  16.     category               = 'news, politics, world'
  17.     no_stylesheets         = True
  18.     oldest_article         = 31
  19.     delay                  = 1
  20.     encoding               = 'utf-8'
  21.     needs_subscription     = True
  22.     masthead_url           = 'http://mondediplo.com/squelettes/pics/logo-30.gif'
  23.     publication_type       = 'magazine'
  24.     PREFIX                 = 'http://mondediplo.com/'
  25.     LOGIN                  = PREFIX + '2009/09/02congo'
  26.     INDEX                  = PREFIX + strftime('%Y/%m/')
  27.     use_embedded_content   = False
  28.     language               = 'en'
  29.     extra_css              = """
  30.                                 body{font-family: "Luxi sans","Lucida sans","Lucida Grande",Lucida,"Lucida Sans Unicode",sans-serif}
  31.                                 .surtitre{font-size: 1.2em; font-variant: small-caps; margin-bottom: 0.5em}
  32.                                 .chapo{font-size: 1.2em; font-weight: bold; margin: 1em 0 0.5em}
  33.                                 .texte{font-family: Georgia,"Times New Roman",serif} h1{color: #990000}
  34.                                 .notes{border-top: 1px solid #CCCCCC; font-size: 0.9em; line-height: 1.4em}
  35.                             """
  36.  
  37.     conversion_options = {
  38.                           'comment'   : description
  39.                         , 'tags'      : category
  40.                         , 'publisher' : publisher
  41.                         , 'language'  : language
  42.                         }
  43.  
  44.     def get_browser(self):
  45.         br = BasicNewsRecipe.get_browser()
  46.         br.open(self.LOGIN)
  47.         if self.username is not None and self.password is not None:
  48.             data = urllib.urlencode({ 'login':self.username
  49.                                      ,'pass':self.password
  50.                                      ,'enter':'enter'
  51.                                    })
  52.             br.open(self.LOGIN,data)
  53.         return br
  54.  
  55.     keep_only_tags    =[
  56.                           dict(name='div', attrs={'id':'contenu'})
  57.                         , dict(name='div',attrs={'class':'notes surlignable'})
  58.                         ]
  59.     remove_tags = [dict(name=['object','link','script','iframe','base'])]
  60.     remove_attributes = ['height','width','name','lang']
  61.  
  62.     def parse_index(self):
  63.         articles = []
  64.         soup = self.index_to_soup(self.INDEX)
  65.         cnt = soup.find('div',attrs={'class':'som_num'})
  66.         for item in cnt.findAll('li'):
  67.             description = ''
  68.             feed_link = item.find('a')
  69.             desc = item.find('div',attrs={'class':'chapo'})
  70.             if desc:
  71.                description = desc.string
  72.             if feed_link and feed_link.has_key('href'):
  73.                 url   = self.PREFIX + feed_link['href'].partition('/../')[2]
  74.                 title = self.tag_to_string(feed_link)
  75.                 date  = strftime(self.timefmt)
  76.                 articles.append({
  77.                                   'title'      :title
  78.                                  ,'date'       :date
  79.                                  ,'url'        :url
  80.                                  ,'description':description
  81.                                 })
  82.         return [(self.title, articles)]
  83.  
  84.     def get_cover_url(self):
  85.         cover_url = None
  86.         soup = self.index_to_soup(self.INDEX)
  87.         cover_item = soup.find('div',attrs={'class':'current'})
  88.         if cover_item:
  89.            ap = cover_item.find('img',attrs={'class':'spip_logos'})
  90.            if ap:
  91.               cover_url = self.INDEX + ap['src']
  92.         return cover_url
  93.  
  94.     def preprocess_html(self, soup):
  95.         for item in soup.findAll(style=True):
  96.             del item['style']
  97.         for item in soup.findAll('a'):
  98.             if item.string is not None:
  99.                str = item.string
  100.                item.replaceWith(str)
  101.             else:
  102.                str = self.tag_to_string(item)
  103.                item.replaceWith(str)
  104.         return soup
  105.