home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Chip 2011 November
/
CHIP_2011_11.iso
/
Programy
/
Narzedzia
/
Calibre
/
calibre-0.8.18.msi
/
file_280
/
wikinews_en.recipe
< prev
next >
Wrap
Text File
|
2011-09-09
|
3KB
|
75 lines
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Darko Miletic <darko.miletic at gmail.com>'
'''
en.wikinews.org
'''
from calibre.web.feeds.news import BasicNewsRecipe
class WikiNews(BasicNewsRecipe):
title = 'Wikinews'
__author__ = 'Darko Miletic'
description = 'News from wikipedia'
category = 'news, world'
oldest_article = 7
max_articles_per_feed = 100
publisher = 'Wiki'
no_stylesheets = True
use_embedded_content = False
encoding = 'utf-8'
remove_javascript = True
language = 'en'
html2lrf_options = [
'--comment', description
, '--category', category
, '--publisher', publisher
]
html2epub_options = 'publisher="' + publisher + '"\ncomments="' + description + '"\ntags="' + category + '"'
keep_only_tags = [
dict(name='h1', attrs={'id':'firstHeading'})
,dict(name='div', attrs={'id':'bodyContent'})
]
remove_tags = [
dict(name='link')
,dict(name='div',attrs={'id':['printfooter','catlinks','footer']})
,dict(name='div',attrs={'class':['thumb left','thumb right']})
]
remove_tags_after = dict(name='h2')
feeds = [(u'News', u'http://feeds.feedburner.com/WikinewsLatestNews')]
def get_article_url(self, article):
artl = article.get('link', None)
rest, sep, article_id = artl.rpartition('/')
return 'http://en.wikinews.org/wiki/' + article_id
def print_version(self, url):
rest, sep, article_id = url.rpartition('/')
return 'http://en.wikinews.org/w/index.php?title=' + article_id + '&printable=yes'
def get_cover_url(self):
return 'http://upload.wikimedia.org/wikipedia/commons/b/bd/Wikinews-logo-en.png'
def preprocess_html(self, soup):
mtag = '<meta http-equiv="Content-Language" content="en"/><meta http-equiv="Content-Type" content="text/html; charset=utf-8">'
soup.head.insert(0,mtag)
btag = soup.find('div',attrs={'id':'bodyContent'})
for item in btag.findAll('div'):
item.extract()
for item in btag.findAll('h2'):
item.extract()
for item in soup.findAll(style=True):
del item['style']
for item in soup.findAll(font=True):
del item['font']
return soup