[hentaibox] remove extractor
This commit is contained in:
@@ -26,7 +26,6 @@ modules = [
|
|||||||
"gomanga",
|
"gomanga",
|
||||||
"hbrowse",
|
"hbrowse",
|
||||||
"hentai2read",
|
"hentai2read",
|
||||||
"hentaibox",
|
|
||||||
"hentaifoundry",
|
"hentaifoundry",
|
||||||
"hentaihere",
|
"hentaihere",
|
||||||
"hitomi",
|
"hitomi",
|
||||||
|
|||||||
@@ -1,59 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright 2016 Mike Fährmann
|
|
||||||
#
|
|
||||||
# This program is free software; you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License version 2 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
|
|
||||||
"""Extract images from http://www.hentaibox.net/"""
|
|
||||||
|
|
||||||
from .common import Extractor, Message
|
|
||||||
from .. import text, iso639_1
|
|
||||||
|
|
||||||
|
|
||||||
class HentaiboxChapterExtractor(Extractor):
|
|
||||||
"""Extractor for a single manga chapter from hentaibox.net"""
|
|
||||||
category = "hentaibox"
|
|
||||||
subcategory = "chapter"
|
|
||||||
directory_fmt = ["{category}", "{series}", "{title}"]
|
|
||||||
filename_fmt = "{num:>03}.{extension}"
|
|
||||||
pattern = [r"(?:https?://)?(?:www\.)?hentaibox\.net/"
|
|
||||||
r"[^/]+/(\d+)_\d+_([^/&]+)"]
|
|
||||||
test = [(("http://www.hentaibox.net/hentai-manga/"
|
|
||||||
"16_18_Original_Amazon-No-Hiyaku-Amazon-Elixir-Decensored"), {
|
|
||||||
"url": "d1a50a9b289d284f178971e01cf312791888e057",
|
|
||||||
"keyword": "b4b100f800b716e573e072f01b5d604d9b436b70",
|
|
||||||
})]
|
|
||||||
|
|
||||||
def __init__(self, match):
|
|
||||||
Extractor.__init__(self)
|
|
||||||
self.url = match.group(0)
|
|
||||||
self.count = match.group(1)
|
|
||||||
|
|
||||||
def items(self):
|
|
||||||
page = self.request(self.url + "&slideshow=play").text
|
|
||||||
data = self.get_job_metadata(page)
|
|
||||||
yield Message.Version, 1
|
|
||||||
yield Message.Directory, data
|
|
||||||
for num, url in enumerate(self.get_image_urls(page), 1):
|
|
||||||
data["num"] = num
|
|
||||||
data["extension"] = url[url.rfind(".")+1:]
|
|
||||||
yield Message.Url, url, data
|
|
||||||
|
|
||||||
def get_job_metadata(self, page):
|
|
||||||
"""Collect metadata for extractor-job"""
|
|
||||||
data = text.extract_all(page, (
|
|
||||||
("title" , 'content="Read or Download ', ' hentai manga from'),
|
|
||||||
("series" , ' the series ', ' with ' + self.count),
|
|
||||||
("language", ' translated pages to ', '.'),
|
|
||||||
), values={"count": self.count})[0]
|
|
||||||
data["lang"] = iso639_1.language_to_code(data["language"])
|
|
||||||
return data
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_image_urls(page):
|
|
||||||
"""Extract and return a list of all image-urls"""
|
|
||||||
yield from text.extract_iter(
|
|
||||||
page, '<span class="slideshow_path">', '</span>'
|
|
||||||
)
|
|
||||||
@@ -18,7 +18,6 @@ Supported Sites
|
|||||||
- gomanga.co
|
- gomanga.co
|
||||||
- hbrowse.com
|
- hbrowse.com
|
||||||
- hentai2read.com
|
- hentai2read.com
|
||||||
- hentaibox.net
|
|
||||||
- hentaifoundry.com
|
- hentaifoundry.com
|
||||||
- hentaihere.com
|
- hentaihere.com
|
||||||
- hitomi.la
|
- hitomi.la
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ def _generate_test(extr, tcase):
|
|||||||
|
|
||||||
|
|
||||||
# enable selective testing for direct calls
|
# enable selective testing for direct calls
|
||||||
skip = ["exhentai", "kissmanga", "mangafox", "yomanga"]
|
skip = ["exhentai", "kissmanga", "mangafox"]
|
||||||
if __name__ == '__main__' and len(sys.argv) > 1:
|
if __name__ == '__main__' and len(sys.argv) > 1:
|
||||||
extractors = [
|
extractors = [
|
||||||
extr for extr in extractor.extractors()
|
extr for extr in extractor.extractors()
|
||||||
|
|||||||
Reference in New Issue
Block a user