code cleanup to use nameext_from_url
This commit is contained in:
@@ -44,14 +44,11 @@ class BatotoExtractor(AsynchronousExtractor):
|
|||||||
page = self.request(self.url, params=params).text
|
page = self.request(self.url, params=params).text
|
||||||
data = self.get_job_metadata(page)
|
data = self.get_job_metadata(page)
|
||||||
yield Message.Version, 1
|
yield Message.Version, 1
|
||||||
yield Message.Directory, data
|
yield Message.Directory, data.copy()
|
||||||
for i in range(int(data["count"])):
|
for i in range(int(data["count"])):
|
||||||
next_url, image_url = self.get_page_urls(page)
|
next_url, image_url = self.get_page_urls(page)
|
||||||
filename = text.unquote(text.filename_from_url(image_url))
|
text.nameext_from_url(image_url, data)
|
||||||
name, ext = os.path.splitext(filename)
|
|
||||||
data["page"] = i+1
|
data["page"] = i+1
|
||||||
data["name"] = name
|
|
||||||
data["extension"] = ext[1:]
|
|
||||||
yield Message.Url, image_url, data.copy()
|
yield Message.Url, image_url, data.copy()
|
||||||
if next_url:
|
if next_url:
|
||||||
params["p"] += 1
|
params["p"] += 1
|
||||||
|
|||||||
@@ -59,13 +59,7 @@ class BooruExtractor(Extractor):
|
|||||||
def get_file_metadata(self, data):
|
def get_file_metadata(self, data):
|
||||||
"""Collect metadata for a downloadable file"""
|
"""Collect metadata for a downloadable file"""
|
||||||
data["category"] = self.info["category"]
|
data["category"] = self.info["category"]
|
||||||
data["filename"] = text.unquote(
|
return text.nameext_from_url(self.get_file_url(data), data)
|
||||||
text.filename_from_url(self.get_file_url(data))
|
|
||||||
)
|
|
||||||
name, ext = os.path.splitext(data["filename"])
|
|
||||||
data["name"] = name
|
|
||||||
data["extension"] = ext[1:]
|
|
||||||
return data
|
|
||||||
|
|
||||||
def get_file_url(self, data):
|
def get_file_url(self, data):
|
||||||
"""Extract download-url from 'data'"""
|
"""Extract download-url from 'data'"""
|
||||||
|
|||||||
@@ -85,17 +85,15 @@ class DeviantArtExtractor(AsynchronousExtractor):
|
|||||||
url , pos = text.extract(image, ' data-super-img="', '"', pos)
|
url , pos = text.extract(image, ' data-super-img="', '"', pos)
|
||||||
width , pos = text.extract(image, ' data-super-width="', '"', pos)
|
width , pos = text.extract(image, ' data-super-width="', '"', pos)
|
||||||
height, pos = text.extract(image, ' data-super-height="', '"', pos)
|
height, pos = text.extract(image, ' data-super-height="', '"', pos)
|
||||||
name, ext = os.path.splitext(text.filename_from_url(url))
|
data = {
|
||||||
return url, {
|
|
||||||
"index": index,
|
"index": index,
|
||||||
"title": match.group(1),
|
"title": match.group(1),
|
||||||
"artist": match.group(2),
|
"artist": match.group(2),
|
||||||
"date": match.group(3),
|
"date": match.group(3),
|
||||||
"width": width,
|
"width": width,
|
||||||
"height": height,
|
"height": height,
|
||||||
"name": name,
|
|
||||||
"extension": ext[1:],
|
|
||||||
}
|
}
|
||||||
|
return url, text.nameext_from_url(url, data)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def extract_data(txt, attr, pattern):
|
def extract_data(txt, attr, pattern):
|
||||||
|
|||||||
@@ -58,10 +58,8 @@ class ExhentaiExtractor(Extractor):
|
|||||||
urlkey = "origurl"
|
urlkey = "origurl"
|
||||||
for num, image in enumerate(self.get_images(url), 1):
|
for num, image in enumerate(self.get_images(url), 1):
|
||||||
image.update(data)
|
image.update(data)
|
||||||
name, ext = os.path.splitext(text.filename_from_url(image["url"]))
|
|
||||||
image["num"] = num
|
image["num"] = num
|
||||||
image["name"] = name
|
text.nameext_from_url(image["url"], image)
|
||||||
image["extension"] = ext[1:]
|
|
||||||
if "/fullimg.php" in image[urlkey]:
|
if "/fullimg.php" in image[urlkey]:
|
||||||
time.sleep(random.uniform(1, 2))
|
time.sleep(random.uniform(1, 2))
|
||||||
yield Message.Url, image[urlkey], image
|
yield Message.Url, image[urlkey], image
|
||||||
|
|||||||
@@ -36,10 +36,8 @@ class HbrowseExtractor(Extractor):
|
|||||||
yield Message.Version, 1
|
yield Message.Version, 1
|
||||||
yield Message.Directory, data
|
yield Message.Directory, data
|
||||||
for num, url in enumerate(self.get_image_urls(page), 1):
|
for num, url in enumerate(self.get_image_urls(page), 1):
|
||||||
name, ext = os.path.splitext(text.filename_from_url(url))
|
|
||||||
data["name"] = name
|
|
||||||
data["extension"] = ext[1:]
|
|
||||||
data["num"] = num
|
data["num"] = num
|
||||||
|
text.nameext_from_url(url, data)
|
||||||
yield Message.Url, url, data
|
yield Message.Url, url, data
|
||||||
|
|
||||||
def get_job_metadata(self, page):
|
def get_job_metadata(self, page):
|
||||||
|
|||||||
@@ -70,14 +70,12 @@ class HentaiFoundryExtractor(Extractor):
|
|||||||
page = self.request(url).text
|
page = self.request(url).text
|
||||||
index = text.extract(url, '/', '/', len(self.url_base) + len(self.artist))[0]
|
index = text.extract(url, '/', '/', len(self.url_base) + len(self.artist))[0]
|
||||||
title, pos = text.extract(page, 'Pictures</a> » <span>', '<')
|
title, pos = text.extract(page, 'Pictures</a> » <span>', '<')
|
||||||
url , pos = text.extract(page, '//pictures.hentai-foundry.com', '"', pos)#
|
url , pos = text.extract(page, '//pictures.hentai-foundry.com', '"', pos)
|
||||||
name, ext = os.path.splitext(text.filename_from_url(url))
|
data = {
|
||||||
return "http://pictures.hentai-foundry.com" + url, {
|
|
||||||
"index": index,
|
"index": index,
|
||||||
"title": text.unescape(title),
|
"title": text.unescape(title),
|
||||||
"name": name,
|
|
||||||
"extension": ext[1:],
|
|
||||||
}
|
}
|
||||||
|
return "http://pictures.hentai-foundry.com" + url, text.nameext_from_url(url, data)
|
||||||
|
|
||||||
def set_filters(self, token):
|
def set_filters(self, token):
|
||||||
"""Set site-internal filters to show all images"""
|
"""Set site-internal filters to show all images"""
|
||||||
|
|||||||
@@ -37,11 +37,8 @@ class HitomiExtractor(Extractor):
|
|||||||
yield Message.Version, 1
|
yield Message.Version, 1
|
||||||
yield Message.Directory, data
|
yield Message.Directory, data
|
||||||
for num, url in enumerate(images, 1):
|
for num, url in enumerate(images, 1):
|
||||||
name, ext = os.path.splitext(text.filename_from_url(url))
|
|
||||||
data["num"] = num
|
data["num"] = num
|
||||||
data["name"] = name
|
yield Message.Url, url, text.nameext_from_url(url, data)
|
||||||
data["extension"] = ext[1:]
|
|
||||||
yield Message.Url, url, data
|
|
||||||
|
|
||||||
def get_job_metadata(self, page):
|
def get_job_metadata(self, page):
|
||||||
"""Collect metadata for extractor-job"""
|
"""Collect metadata for extractor-job"""
|
||||||
|
|||||||
@@ -37,11 +37,8 @@ class ImagebamExtractor(AsynchronousExtractor):
|
|||||||
yield Message.Directory, data
|
yield Message.Directory, data
|
||||||
for image_url, image_id in self.get_images(data["first-url"]):
|
for image_url, image_id in self.get_images(data["first-url"]):
|
||||||
data["id"] = image_id
|
data["id"] = image_id
|
||||||
data["filename"] = text.unquote(text.filename_from_url(image_url))
|
|
||||||
name, ext = os.path.splitext(data["filename"])
|
|
||||||
data["num"] += 1
|
data["num"] += 1
|
||||||
data["name"] = name
|
text.nameext_from_url(image_url, data)
|
||||||
data["extension"] = ext[1:]
|
|
||||||
yield Message.Url, image_url, data.copy()
|
yield Message.Url, image_url, data.copy()
|
||||||
|
|
||||||
def get_job_metadata(self):
|
def get_job_metadata(self):
|
||||||
|
|||||||
@@ -35,12 +35,8 @@ class ImgthExtractor(Extractor):
|
|||||||
yield Message.Version, 1
|
yield Message.Version, 1
|
||||||
yield Message.Directory, data
|
yield Message.Directory, data
|
||||||
for num, url in enumerate(self.get_images(page), 1):
|
for num, url in enumerate(self.get_images(page), 1):
|
||||||
name, ext = os.path.splitext(text.filename_from_url(url))
|
|
||||||
data["num"] = num
|
data["num"] = num
|
||||||
data["name"] = name
|
yield Message.Url, url, text.nameext_from_url(url, data)
|
||||||
data["extension"] = ext[1:]
|
|
||||||
yield Message.Url, url, data
|
|
||||||
|
|
||||||
def get_images(self, page):
|
def get_images(self, page):
|
||||||
pnum = 0
|
pnum = 0
|
||||||
while True:
|
while True:
|
||||||
|
|||||||
@@ -38,12 +38,8 @@ class KissmangaExtractor(Extractor):
|
|||||||
yield Message.Version, 1
|
yield Message.Version, 1
|
||||||
yield Message.Directory, data
|
yield Message.Directory, data
|
||||||
for num, url in enumerate(imgs, 1):
|
for num, url in enumerate(imgs, 1):
|
||||||
filename = text.unquote(text.filename_from_url(url))
|
|
||||||
name, ext = os.path.splitext(filename)
|
|
||||||
data["page"] = num
|
data["page"] = num
|
||||||
data["name"] = name
|
yield Message.Url, url, text.nameext_from_url(url, data)
|
||||||
data["extension"] = ext[1:]
|
|
||||||
yield Message.Url, url, data
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_job_metadata(page):
|
def get_job_metadata(page):
|
||||||
|
|||||||
@@ -91,12 +91,7 @@ class MangaReaderExtractor(AsynchronousExtractor):
|
|||||||
width , pos = extr(page, '<img id="img" width="', '"', pos)
|
width , pos = extr(page, '<img id="img" width="', '"', pos)
|
||||||
height, pos = extr(page, ' height="', '"', pos)
|
height, pos = extr(page, ' height="', '"', pos)
|
||||||
image, pos = extr(page, ' src="', '"', pos)
|
image, pos = extr(page, ' src="', '"', pos)
|
||||||
filename = text.unquote(text.filename_from_url(image))
|
return self.url_base + url, image, text.nameext_from_url(image, {
|
||||||
name, ext = os.path.splitext(filename)
|
|
||||||
|
|
||||||
return self.url_base + url, image, {
|
|
||||||
"width": width,
|
"width": width,
|
||||||
"height": height,
|
"height": height,
|
||||||
"name": name,
|
})
|
||||||
"extension": ext[1:],
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -34,12 +34,10 @@ class MangaShareExtractor(AsynchronousExtractor):
|
|||||||
page = self.request(self.url_fmt.format(self.part, 1)).text
|
page = self.request(self.url_fmt.format(self.part, 1)).text
|
||||||
data = self.get_job_metadata(page)
|
data = self.get_job_metadata(page)
|
||||||
yield Message.Version, 1
|
yield Message.Version, 1
|
||||||
yield Message.Directory, data
|
yield Message.Directory, data.copy()
|
||||||
for i, url in zip(range(int(data["count"])), (self.get_image_urls(page))):
|
for i, url in zip(range(int(data["count"])), (self.get_image_urls(page))):
|
||||||
name, ext = os.path.splitext(text.filename_from_url(url))
|
|
||||||
data["name"] = name
|
|
||||||
data["extension"] = ext[1:]
|
|
||||||
data["page"] = i+1
|
data["page"] = i+1
|
||||||
|
text.nameext_from_url(url, data)
|
||||||
yield Message.Url, url, data.copy()
|
yield Message.Url, url, data.copy()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|||||||
@@ -40,11 +40,8 @@ class MangaStreamExtractor(AsynchronousExtractor):
|
|||||||
if next_url:
|
if next_url:
|
||||||
page = self.request(next_url).text
|
page = self.request(next_url).text
|
||||||
next_url, image_url = self.get_page_metadata(page)
|
next_url, image_url = self.get_page_metadata(page)
|
||||||
filename = text.unquote(text.filename_from_url(image_url))
|
text.nameext_from_url(image_url, data)
|
||||||
name, ext = os.path.splitext(filename)
|
|
||||||
data["page"] = i+1
|
data["page"] = i+1
|
||||||
data["name"] = name
|
|
||||||
data["extension"] = ext[1:]
|
|
||||||
yield Message.Url, image_url, data.copy()
|
yield Message.Url, image_url, data.copy()
|
||||||
|
|
||||||
def get_job_metadata(self, page):
|
def get_job_metadata(self, page):
|
||||||
|
|||||||
@@ -67,10 +67,8 @@ class NijieExtractor(AsynchronousExtractor):
|
|||||||
page = self.request(self.popup_url + image_id).text
|
page = self.request(self.popup_url + image_id).text
|
||||||
matches = re.findall('<img src="([^"]+)"', page)
|
matches = re.findall('<img src="([^"]+)"', page)
|
||||||
for index, url in enumerate(matches):
|
for index, url in enumerate(matches):
|
||||||
yield "https:" + url, {
|
yield "https:" + url, text.nameext_from_url(url, {
|
||||||
"count": len(matches),
|
"count": len(matches),
|
||||||
"index": index,
|
"index": index,
|
||||||
"image-id": image_id,
|
"image-id": image_id,
|
||||||
"name" : text.filename_from_url(url),
|
})
|
||||||
"extension": url[url.rfind(".")+1:],
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -76,14 +76,11 @@ class SankakuExtractor(AsynchronousExtractor):
|
|||||||
image_url, pos = text.extract(page, '<li>Original: <a href="', '"')
|
image_url, pos = text.extract(page, '<li>Original: <a href="', '"')
|
||||||
width , pos = text.extract(page, '>', 'x', pos)
|
width , pos = text.extract(page, '>', 'x', pos)
|
||||||
height , pos = text.extract(page, '', ' ', pos)
|
height , pos = text.extract(page, '', ' ', pos)
|
||||||
filename = text.filename_from_url(image_url)
|
data = text.nameext_from_url(image_url, {
|
||||||
name, ext = os.path.splitext(filename)
|
|
||||||
return {
|
|
||||||
"id": image_id,
|
"id": image_id,
|
||||||
"file-url": "https:" + image_url,
|
"file-url": "https:" + image_url,
|
||||||
"width": width,
|
"width": width,
|
||||||
"height": height,
|
"height": height,
|
||||||
"md5": name,
|
})
|
||||||
"name": name,
|
data["md5"] = data["name"]
|
||||||
"extension": ext[1:],
|
return data
|
||||||
}
|
|
||||||
|
|||||||
@@ -41,10 +41,8 @@ class SpectrumNexusExtractor(AsynchronousExtractor):
|
|||||||
count = int(data["count"])
|
count = int(data["count"])
|
||||||
for i in range(1, count+1):
|
for i in range(1, count+1):
|
||||||
url = self.get_image_url(page)
|
url = self.get_image_url(page)
|
||||||
name, ext = os.path.splitext(text.filename_from_url(url))
|
text.nameext_from_url(url, data)
|
||||||
data["page"] = i
|
data["page"] = i
|
||||||
data["name"] = name
|
|
||||||
data["extension"] = ext[1:]
|
|
||||||
yield Message.Url, url, data.copy()
|
yield Message.Url, url, data.copy()
|
||||||
if i < count:
|
if i < count:
|
||||||
params["page"] += 1
|
params["page"] += 1
|
||||||
|
|||||||
Reference in New Issue
Block a user