home *** CD-ROM | disk | FTP | other *** search
-
- __license__ = 'GPL v3'
- __copyright__ = '2010, Darko Miletic <darko.miletic at gmail.com>'
- '''
- www.wired.com
- '''
-
- import re
- from calibre import strftime
- from calibre.web.feeds.news import BasicNewsRecipe
-
- class Wired(BasicNewsRecipe):
- title = 'Wired Magazine'
- __author__ = 'Darko Miletic'
- description = 'Gaming news'
- publisher = 'Conde Nast Digital'
- category = 'news, games, IT, gadgets'
- oldest_article = 32
- delay = 1
- max_articles_per_feed = 100
- no_stylesheets = True
- encoding = 'utf-8'
- use_embedded_content = False
- masthead_url = 'http://www.wired.com/images/home/wired_logo.gif'
- language = 'en'
- publication_type = 'magazine'
- extra_css = ' body{font-family: Arial,Verdana,sans-serif} .entryDescription li {display: inline; list-style-type: none} '
- index = 'http://www.wired.com/magazine/'
-
- preprocess_regexps = [(re.compile(r'<meta name="Title".*<title>', re.DOTALL|re.IGNORECASE),lambda match: '<title>')]
- conversion_options = {
- 'comment' : description
- , 'tags' : category
- , 'publisher' : publisher
- , 'language' : language
- }
-
- keep_only_tags = [dict(name='div', attrs={'class':'post'})]
- remove_tags_after = dict(name='div', attrs={'class':'tweetmeme_button'})
- remove_tags = [
- dict(name=['object','embed','iframe','link','meta','base'])
- ,dict(name='div', attrs={'class':['podcast_storyboard','tweetmeme_button']})
- ,dict(attrs={'id':'ff_bottom_nav'})
- ,dict(name='a',attrs={'href':'http://www.wired.com/app'})
- ]
- remove_attributes = ['height','width','lang','border','clear']
-
-
- def parse_index(self):
- totalfeeds = []
-
- soup = self.index_to_soup(self.index)
- majorf = soup.find('div',attrs={'class':'index'})
- if majorf:
- pfarticles = []
- firsta = majorf.find(attrs={'class':'spread-header'})
- if firsta:
- pfarticles.append({
- 'title' :self.tag_to_string(firsta.a)
- ,'date' :strftime(self.timefmt)
- ,'url' :'http://www.wired.com' + firsta.a['href']
- ,'description':''
- })
- for itt in majorf.findAll('li'):
- itema = itt.find('a',href=True)
- if itema:
- pfarticles.append({
- 'title' :self.tag_to_string(itema)
- ,'date' :strftime(self.timefmt)
- ,'url' :'http://www.wired.com' + itema['href']
- ,'description':''
- })
- totalfeeds.append(('Cover', pfarticles))
- features = soup.find('div',attrs={'id':'my-glider'})
- if features:
- farticles = []
- for item in features.findAll('div',attrs={'class':'section'}):
- divurl = item.find('div',attrs={'class':'feature-header'})
- if divurl:
- divdesc = item.find('div',attrs={'class':'feature-text'})
- url = divurl.a['href']
- if not divurl.a['href'].startswith('http://www.wired.com'):
- url = 'http://www.wired.com' + divurl.a['href']
- title = self.tag_to_string(divurl.a)
- description = self.tag_to_string(divdesc)
- date = strftime(self.timefmt)
- farticles.append({
- 'title' :title
- ,'date' :date
- ,'url' :url
- ,'description':description
- })
- totalfeeds.append(('Featured Articles', farticles))
- #department feeds
- departments = ['rants','start','test','play','found']
- dept = soup.find('div',attrs={'id':'magazine-departments'})
- if dept:
- for ditem in departments:
- darticles = []
- department = dept.find('div',attrs={'id':'department-'+ditem})
- if department:
- for item in department.findAll('div'):
- description = ''
- feed_link = item.find('a')
- if feed_link and feed_link.has_key('href'):
- url = feed_link['href']
- title = self.tag_to_string(feed_link)
- date = strftime(self.timefmt)
- darticles.append({
- 'title' :title
- ,'date' :date
- ,'url' :url
- ,'description':description
- })
- totalfeeds.append((ditem.capitalize(), darticles))
- return totalfeeds
-
- def get_cover_url(self):
- cover_url = None
- soup = self.index_to_soup(self.index)
- cover_item = soup.find('div',attrs={'class':'spread-image'})
- if cover_item:
- cover_url = 'http://www.wired.com' + cover_item.a.img['src']
- return cover_url
-
- def print_version(self, url):
- return url.rstrip('/') + '/all/1'
-
- def preprocess_html(self, soup):
- for item in soup.findAll(style=True):
- del item['style']
- for item in soup.findAll('a'):
- if item.string is not None:
- tstr = item.string
- item.replaceWith(tstr)
- else:
- item.name='span'
- for atrs in ['href','target','alt','title','name','id']:
- if item.has_key(atrs):
- del item[atrs]
- for item in soup.findAll('img'):
- if not item.has_key('alt'):
- item['alt'] = 'image'
- return soup
-
-