Calibre plugin enabling search and download from
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

83 lines
3.1 KiB

__license__ = 'GPL 3'
__copyright__ = '2012, Ruben Pollan <>; 2020, ibu radempa <>'
__docformat__ = 'restructuredtext en'
import json
from urllib.parse import quote_plus as quote
from urllib2 import quote
from PyQt5.Qt import QUrl
from PyQt4.Qt import QUrl
from contextlib import closing
from calibre import browser
from calibre.gui2 import open_url
from import StorePlugin
from import BasicStoreConfig
from import SearchResult
from import WebStoreDialog
from . import TheAnarchistLibraryStore
url1 = ''
url2 = ''
"""Search URLs. If the library has no fallback url, set url2 = None."""
max_pages = 10
"""Page limit. (amusewiki gives us 10 results per page.)"""
user_agent = 'Calibre plugin calibre-tal v' + '{}.{}.{}'.format(*TheAnarchistLibraryStore.version)
class TheAnarchistLibraryStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = ''
if external or self.config.get('open_external', False):
open_url(QUrl(url_slash_cleaner(detail_item if detail_item else url)))
d = WebStoreDialog(self.gui, url, parent, detail_item)
d.set_tags(self.config.get('tags', ''))
def search(self, query, max_results=10, timeout=10):
br = browser(user_agent=user_agent)
page = 0
while page < max_pages:
page += 1
for result in self._iter_search_results(br, url1, page, query, timeout):
if result is False:
yield result
if url2:
for result in self._iter_search_results(br, url2, page, query, timeout):
if result is False:
yield result
def _iter_search_results(self, br, url, page, query, timeout):
with closing( % (page, quote(query)), timeout=timeout)) as f:
doc = json.load(f)
if not doc:
yield False
for data in doc:
s = SearchResult()
s.title = data['title'].strip() = data['author'].strip()
s.price = '$0.00'
s.detail_item = data['url'].strip()
s.drm = SearchResult.DRM_UNLOCKED
s.downloads['EPUB'] = data['url'].strip() + '.epub'
s.downloads['PDF'] = data['url'].strip() + '.pdf'
s.downloads['A4.PDF'] = data['url'].strip() + '.a4.pdf'
s.downloads['LT.PDF'] = data['url'].strip() + '.lt.pdf'
s.formats = 'EPUB, PDF, A4.PDF, LT.PDF'
yield s