remove @staticmethod decorators

There might have been a time when calling a static method was faster
than a regular method, but that is no longer the case. According to
micro-benchmarks, it is 70% slower in CPython 3.13 and it also makes
executing the code of a class definition slower.
This commit is contained in:
Mike Fährmann
2025-06-12 22:13:46 +02:00
parent 8b6bc54e95
commit 811b665e33
68 changed files with 139 additions and 252 deletions

View File

@@ -273,8 +273,7 @@ def main():
jobtype = job.UrlJob
jobtype.maxdepth = args.list_urls
if config.get(("output",), "fallback", True):
jobtype.handle_url = \
staticmethod(jobtype.handle_url_fallback)
jobtype.handle_url = jobtype.handle_url_fallback
elif args.dump_json:
jobtype = job.DataJob
jobtype.resolve = args.dump_json - 1
@@ -549,13 +548,11 @@ class InputManager():
"Unable to update '%s' (%s: %s)",
path, exc.__class__.__name__, exc)
@staticmethod
def _action_comment(lines, indicies):
def _action_comment(self, lines, indicies):
for i in indicies:
lines[i] = "# " + lines[i]
@staticmethod
def _action_delete(lines, indicies):
def _action_delete(self, lines, indicies):
for i in indicies:
lines[i] = ""

View File

@@ -524,8 +524,7 @@ class LinuxChromiumCookieDecryptor(ChromiumCookieDecryptor):
self._cookie_counts = {"v10": 0, "v11": 0, "other": 0}
self._offset = (32 if meta_version >= 24 else 0)
@staticmethod
def derive_key(password):
def derive_key(self, password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads
# /main/components/os_crypt/os_crypt_linux.cc
@@ -569,8 +568,7 @@ class MacChromiumCookieDecryptor(ChromiumCookieDecryptor):
self._cookie_counts = {"v10": 0, "other": 0}
self._offset = (32 if meta_version >= 24 else 0)
@staticmethod
def derive_key(password):
def derive_key(self, password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads
# /main/components/os_crypt/os_crypt_mac.mm

View File

@@ -360,8 +360,7 @@ class HttpDownloader(DownloaderBase):
"closing the connection anyway", exc.__class__.__name__, exc)
response.close()
@staticmethod
def receive(fp, content, bytes_total, bytes_start):
def receive(self, fp, content, bytes_total, bytes_start):
write = fp.write
for data in content:
write(data)
@@ -411,8 +410,7 @@ class HttpDownloader(DownloaderBase):
self.log.warning("Unknown MIME type '%s'", mtype)
return "bin"
@staticmethod
def _adjust_extension(pathfmt, file_header):
def _adjust_extension(self, pathfmt, file_header):
"""Check filename extension against file header"""
if not SIGNATURE_CHECKS[pathfmt.extension](file_header):
for ext, check in SIGNATURE_CHECKS.items():

View File

@@ -233,8 +233,7 @@ class YoutubeDLDownloader(DownloaderBase):
int(speed) if speed else 0,
)
@staticmethod
def _set_outtmpl(ytdl_instance, outtmpl):
def _set_outtmpl(self, ytdl_instance, outtmpl):
try:
ytdl_instance._parse_outtmpl
except AttributeError:

View File

@@ -74,8 +74,7 @@ class _2chanThreadExtractor(Extractor):
data["ext"] = "." + data["extension"]
return data
@staticmethod
def _extract_post(post):
def _extract_post(self, post):
return text.extract_all(post, (
("post", 'class="csb">' , '<'),
("name", 'class="cnm">' , '<'),
@@ -85,8 +84,7 @@ class _2chanThreadExtractor(Extractor):
("com" , '>', '</blockquote>'),
))[0]
@staticmethod
def _extract_image(post, data):
def _extract_image(self, post, data):
text.extract_all(post, (
(None , '_blank', ''),
("filename", '>', '<'),

View File

@@ -83,8 +83,7 @@ class _35photoExtractor(Extractor):
info["num"] = 1
yield info
@staticmethod
def _photo_ids(page):
def _photo_ids(self, page):
"""Extract unique photo IDs and return them as sorted list"""
# searching for photo-id="..." doesn't always work (see unit tests)
if not page:

View File

@@ -58,8 +58,7 @@ class _4archiveThreadExtractor(Extractor):
for post in page.split('class="postContainer')[1:]
]
@staticmethod
def parse(post):
def parse(self, post):
extr = text.extract_from(post)
data = {
"name": extr('class="name">', "</span>"),

View File

@@ -66,8 +66,7 @@ class _4chanarchivesThreadExtractor(Extractor):
post["extension"] = post["url"].rpartition(".")[2]
return post
@staticmethod
def _extract_post(html):
def _extract_post(self, html):
extr = text.extract_from(html)
return {
"no" : text.parse_int(extr('', '"')),
@@ -77,8 +76,7 @@ class _4chanarchivesThreadExtractor(Extractor):
html[html.find('<blockquote'):].partition(">")[2]),
}
@staticmethod
def _extract_file(html, post):
def _extract_file(self, html, post):
extr = text.extract_from(html, html.index(">File: <"))
post["url"] = extr('href="', '"')
post["filename"] = text.unquote(extr(">", "<").rpartition(".")[0])

View File

@@ -92,8 +92,7 @@ class _8musesAlbumExtractor(Extractor):
album["updatedAt"], "%Y-%m-%dT%H:%M:%S.%fZ"),
}
@staticmethod
def _unobfuscate(data):
def _unobfuscate(self, data):
return util.json_loads("".join([
chr(33 + (ord(c) + 14) % 94) if "!" <= c <= "~" else c
for c in text.unescape(data.strip("\t\n\r !"))

View File

@@ -87,8 +87,7 @@ class ArtstationExtractor(Extractor):
yield Message.Url, url, asset
@staticmethod
def _image_fallback(lhs, rhs):
def _image_fallback(self, lhs, rhs):
yield lhs + "/large/" + rhs
yield lhs + "/medium/" + rhs
yield lhs + "/small/" + rhs
@@ -172,8 +171,7 @@ class ArtstationExtractor(Extractor):
url, method="POST", headers=headers, json={},
).json()["public_csrf_token"]
@staticmethod
def _no_cache(url):
def _no_cache(self, url):
"""Cause a cache miss to prevent Cloudflare 'optimizations'
Cloudflare's 'Polish' optimization strips image metadata and may even
@@ -344,8 +342,7 @@ class ArtstationChallengeExtractor(ArtstationExtractor):
text.nameext_from_url(url, update)
yield Message.Url, self._no_cache(url), update
@staticmethod
def _id_from_url(url):
def _id_from_url(self, url):
"""Get an image's submission ID from its URL"""
parts = url.split("/")
return text.parse_int("".join(parts[7:10]))

View File

@@ -60,8 +60,7 @@ class BbcGalleryExtractor(GalleryExtractor):
))
return results
@staticmethod
def _fallback_urls(src, max_width):
def _fallback_urls(self, src, max_width):
front, _, back = src.partition("/320x180_b/")
for width in (1920, 1600, 1280, 976):
if width < max_width:

View File

@@ -59,8 +59,7 @@ class ComicvineTagExtractor(BooruExtractor):
_file_url = operator.itemgetter("original")
@staticmethod
def _prepare(post):
def _prepare(self, post):
post["date"] = text.parse_datetime(
post["dateCreated"], "%a, %b %d %Y")
post["tags"] = [tag["name"] for tag in post["tags"] if tag["name"]]

View File

@@ -280,8 +280,7 @@ class DeviantartExtractor(Extractor):
deviation["index_base36"],
))
@staticmethod
def commit(deviation, target):
def commit(self, deviation, target):
url = target["src"]
name = target.get("filename") or url
target = target.copy()
@@ -680,8 +679,7 @@ x2="45.4107524%" y2="71.4898596%" id="app-root-3">\
return content
@staticmethod
def _find_folder(folders, name, uuid):
def _find_folder(self, folders, name, uuid):
if uuid.isdecimal():
match = util.re(
"(?i)" + name.replace("-", "[^a-z0-9]+") + "$").match
@@ -1889,8 +1887,7 @@ class DeviantartOAuthAPI():
result.extend(self._pagination(endpoint, params, False, key=key))
return result
@staticmethod
def _shared_content(results):
def _shared_content(self, results):
"""Return an iterable of shared deviations in 'results'"""
for result in results:
for item in result.get("items") or ():

View File

@@ -391,8 +391,7 @@ class DiscordAPI():
return
offset += len(data)
@staticmethod
def _raise_invalid_token():
def _raise_invalid_token(self):
raise exception.AuthenticationError("""Invalid or missing token.
Please provide a valid token following these instructions:

View File

@@ -534,8 +534,7 @@ class ExhentaiGalleryExtractor(ExhentaiExtractor):
nl = data["_nl"]
@staticmethod
def _parse_image_info(url):
def _parse_image_info(self, url):
for part in url.split("/")[4:]:
try:
_, size, width, height, _ = part.split("-")
@@ -552,8 +551,7 @@ class ExhentaiGalleryExtractor(ExhentaiExtractor):
"height": text.parse_int(height),
}
@staticmethod
def _parse_original_info(info):
def _parse_original_info(self, info):
parts = info.lstrip().split(" ")
size = text.parse_bytes(parts[3] + parts[4][0])

View File

@@ -37,15 +37,13 @@ class FacebookExtractor(Extractor):
self.videos = self.config("videos", True)
self.author_followups = self.config("author-followups", False)
@staticmethod
def decode_all(txt):
def decode_all(self, txt):
return text.unescape(
txt.encode().decode("unicode_escape")
.encode("utf_16", "surrogatepass").decode("utf_16")
).replace("\\/", "/")
@staticmethod
def parse_set_page(set_page):
def parse_set_page(self, set_page):
directory = {
"set_id": text.extr(
set_page, '"mediaSetToken":"', '"'
@@ -77,8 +75,7 @@ class FacebookExtractor(Extractor):
return directory
@staticmethod
def parse_photo_page(photo_page):
def parse_photo_page(self, photo_page):
photo = {
"id": text.extr(
photo_page, '"__isNode":"Photo","id":"', '"'
@@ -133,8 +130,7 @@ class FacebookExtractor(Extractor):
return photo
@staticmethod
def parse_post_page(post_page):
def parse_post_page(self, post_page):
first_photo_url = text.extr(
text.extr(
post_page, '"__isMedia":"Photo"', '"target_group"'
@@ -148,8 +144,7 @@ class FacebookExtractor(Extractor):
return post
@staticmethod
def parse_video_page(video_page):
def parse_video_page(self, video_page):
video = {
"id": text.extr(
video_page, '\\"video_id\\":\\"', '\\"'
@@ -410,8 +405,7 @@ class FacebookProfileExtractor(FacebookExtractor):
)
example = "https://www.facebook.com/USERNAME"
@staticmethod
def get_profile_photos_set_id(profile_photos_page):
def get_profile_photos_set_id(self, profile_photos_page):
set_ids_raw = text.extr(
profile_photos_page, '"pageItems"', '"page_info"'
)

View File

@@ -585,8 +585,7 @@ class FlickrAPI(oauth.OAuth1API):
if "license" in photo:
photo["license_name"] = self.LICENSES.get(photo["license"])
@staticmethod
def _clean_info(info):
def _clean_info(self, info):
info["title"] = info["title"]["_content"]
info["description"] = info["description"]["_content"]
return info

View File

@@ -74,8 +74,7 @@ class FoolfuukaExtractor(BaseExtractor):
return url
@staticmethod
def _remote_direct(media):
def _remote_direct(self, media):
return media["remote_media_link"]

View File

@@ -24,8 +24,7 @@ class FoolslideExtractor(BaseExtractor):
return BaseExtractor.request(
self, url, encoding="utf-8", method="POST", data={"adult": "true"})
@staticmethod
def parse_chapter_url(url, data):
def parse_chapter_url(self, url, data):
info = url.partition("/read/")[2].rstrip("/").split("/")
lang = info[1].partition("-")[0]
data["lang"] = lang

View File

@@ -152,8 +152,7 @@ class FuraffinityExtractor(Extractor):
return data
@staticmethod
def _process_description(description):
def _process_description(self, description):
return text.unescape(text.remove_html(description, "", ""))
def _pagination(self, path, folder=None):

View File

@@ -89,8 +89,7 @@ class GelbooruV02Extractor(booru.BooruExtractor):
return
params["pid"] += self.per_page
@staticmethod
def _prepare(post):
def _prepare(self, post):
post["tags"] = post["tags"].strip()
post["date"] = text.parse_datetime(
post["created_at"], "%a %b %d %H:%M:%S %z %Y")

View File

@@ -49,8 +49,7 @@ class HentaihereChapterExtractor(HentaihereBase, ChapterExtractor):
"language": "English",
}
@staticmethod
def images(page):
def images(self, page):
images = text.extr(page, "var rff_imageList = ", ";")
return [
("https://hentaicdn.com/hentai" + part, None)

View File

@@ -78,8 +78,7 @@ class HentainexusGalleryExtractor(GalleryExtractor):
pass
return results
@staticmethod
def _decode(data):
def _decode(self, data):
# https://hentainexus.com/static/js/reader.min.js?r=22
hostname = "hentainexus.com"
primes = (2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53)
@@ -118,8 +117,7 @@ class HentainexusGalleryExtractor(GalleryExtractor):
return result
@staticmethod
def _join_title(data):
def _join_title(self, data):
event = data['event']
artist = data['artist']
circle = data['circle']

View File

@@ -63,8 +63,7 @@ class ImagebamGalleryExtractor(ImagebamExtractor):
image.update(data)
yield Message.Url, image["url"], image
@staticmethod
def metadata(page):
def metadata(self, page):
return {"title": text.unescape(text.extr(
page, 'id="gallery-name">', '<').strip())}

View File

@@ -30,18 +30,15 @@ class ImgboxExtractor(Extractor):
text.nameext_from_url(imgdata["filename"], imgdata)
yield Message.Url, self.get_image_url(imgpage), imgdata
@staticmethod
def get_job_metadata():
def get_job_metadata(self):
"""Collect metadata for extractor-job"""
return {}
@staticmethod
def get_image_keys():
def get_image_keys(self):
"""Return an iterable containing all image-keys"""
return []
@staticmethod
def get_image_metadata(page):
def get_image_metadata(self, page):
"""Collect metadata for a downloadable file"""
return text.extract_all(page, (
("num" , '</a> &nbsp; ', ' of '),
@@ -49,8 +46,7 @@ class ImgboxExtractor(Extractor):
("filename" , ' title="', '"'),
))[0]
@staticmethod
def get_image_url(page):
def get_image_url(self, page):
"""Extract download-url"""
return text.extr(page, 'property="og:image" content="', '"')
@@ -102,8 +98,7 @@ class ImgboxImageExtractor(ImgboxExtractor):
def get_image_keys(self):
return (self.image_key,)
@staticmethod
def get_image_metadata(page):
def get_image_metadata(self, page):
data = ImgboxExtractor.get_image_metadata(page)
if not data["filename"]:
raise exception.NotFoundError("image")

View File

@@ -357,8 +357,7 @@ class InstagramExtractor(Extractor):
return data
@staticmethod
def _extract_tagged_users(src, dest):
def _extract_tagged_users(self, src, dest):
dest["tagged_users"] = tagged_users = []
edges = src.get("edge_media_to_tagged_user")
@@ -979,8 +978,7 @@ class InstagramGraphqlAPI():
self.user_by_id = api.user_by_id
self.user_id = api.user_id
@staticmethod
def _unsupported(_=None):
def _unsupported(self, _=None):
raise exception.StopExtraction("Unsupported with GraphQL API")
def highlights_tray(self, user_id):

View File

@@ -121,22 +121,18 @@ class KeenspotComicExtractor(Extractor):
pos = page.index(self._needle) + len(self._needle)
return text.extract(page, 'href="', '"', pos)[0]
@staticmethod
def _next_link(page):
def _next_link(self, page):
return text.extr(page, '<link rel="next" href="', '"')
@staticmethod
def _next_id(page):
def _next_id(self, page):
pos = page.find('id="next_')
return text.rextr(page, 'href="', '"', pos) if pos >= 0 else None
@staticmethod
def _next_lastblood(page):
def _next_lastblood(self, page):
pos = page.index("link rel='next'")
return text.extract(page, "href='", "'", pos)[0]
@staticmethod
def _next_brawl(page):
def _next_brawl(self, page):
pos = page.index("comic-nav-next")
url = text.rextr(page, 'href="', '"', pos)
return None if "?random" in url else url

View File

@@ -20,8 +20,7 @@ class KomikcastBase():
category = "komikcast"
root = "https://komikcast02.com"
@staticmethod
def parse_chapter_string(chapter_string, data=None):
def parse_chapter_string(self, chapter_string, data=None):
"""Parse 'chapter_string' value and add its info to 'data'"""
if data is None:
data = {}
@@ -52,8 +51,7 @@ class KomikcastChapterExtractor(KomikcastBase, ChapterExtractor):
info = text.extr(page, "<title>", " - Komikcast<")
return self.parse_chapter_string(info)
@staticmethod
def images(page):
def images(self, page):
readerarea = text.extr(
page, '<div class="main-reading-area', '</div')
pattern = util.re(r"<img[^>]* src=[\"']([^\"']+)")
@@ -82,8 +80,7 @@ class KomikcastMangaExtractor(KomikcastBase, MangaExtractor):
results.append((url, data.copy()))
return results
@staticmethod
def metadata(page):
def metadata(self, page):
"""Return a dict with general metadata"""
manga , pos = text.extract(page, "<title>" , " - Komikcast<")
genres, pos = text.extract(

View File

@@ -15,8 +15,7 @@ class MangareadBase():
category = "mangaread"
root = "https://www.mangaread.org"
@staticmethod
def parse_chapter_string(chapter_string, data):
def parse_chapter_string(self, chapter_string, data):
match = util.re(
r"(?:(.+)\s*-\s*)?[Cc]hapter\s*(\d+)(\.\d+)?(?:\s*-\s*(.+))?"
).match(text.unescape(chapter_string).strip())

View File

@@ -53,8 +53,7 @@ class MangoxoExtractor(Extractor):
raise exception.AuthenticationError(data.get("msg"))
return {"SESSION": self.cookies.get("SESSION")}
@staticmethod
def _sign_by_md5(username, password, token):
def _sign_by_md5(self, username, password, token):
# https://dns.mangoxo.com/libs/plugins/phoenix-ui/js/phoenix-ui.js
params = [
("username" , username),
@@ -68,8 +67,7 @@ class MangoxoExtractor(Extractor):
params.append(("sign", sign.upper()))
return params
@staticmethod
def _total_pages(page):
def _total_pages(self, page):
return text.parse_int(text.extract(page, "total :", ",")[0])

View File

@@ -20,8 +20,7 @@ class MoebooruExtractor(BooruExtractor):
filename_fmt = "{category}_{id}_{md5}.{extension}"
page_start = 1
@staticmethod
def _prepare(post):
def _prepare(self, post):
post["date"] = text.parse_timestamp(post["created_at"])
def _html(self, post):

View File

@@ -53,8 +53,7 @@ class MyportfolioGalleryExtractor(Extractor):
for data["num"], url in enumerate(imgs, 1):
yield Message.Url, url, text.nameext_from_url(url, data)
@staticmethod
def metadata(page):
def metadata(self, page):
"""Collect general image metadata"""
# og:title contains data as "<user> - <title>", but both
# <user> and <title> can contain a "-" as well, so we get the title
@@ -81,8 +80,7 @@ class MyportfolioGalleryExtractor(Extractor):
"description": text.unescape(descr),
}
@staticmethod
def images(page):
def images(self, page):
"""Extract and return a list of all image-urls"""
return (
list(text.extract_iter(page, 'js-lightbox" data-src="', '"')) or

View File

@@ -54,8 +54,7 @@ class NaverwebtoonEpisodeExtractor(NaverwebtoonBase, GalleryExtractor):
extr('"painters":[', ']'), '"name":"', '"')]
}
@staticmethod
def images(page):
def images(self, page):
view_area = text.extr(page, 'id="comic_view_area"', '</div>')
return [
(url, None)

View File

@@ -260,8 +260,7 @@ class NewgroundsExtractor(Extractor):
else:
yield {"image": url}
@staticmethod
def _extract_audio_data(extr, url):
def _extract_audio_data(self, extr, url):
index = url.split("/")[5]
return {
"title" : text.unescape(extr('"og:title" content="', '"')),
@@ -529,8 +528,7 @@ class NewgroundsFollowingExtractor(NewgroundsFavoriteExtractor):
for url in self._pagination_favorites(kind, pnum):
yield Message.Queue, url, data
@staticmethod
def _extract_favorites(page):
def _extract_favorites(self, page):
return [
text.ensure_http_scheme(user.rpartition('"')[2])
for user in text.extract_iter(page, 'class="item-user', '"><img')

View File

@@ -73,8 +73,7 @@ class NijieExtractor(AsynchronousMixin, BaseExtractor):
def image_ids(self):
"""Collect all relevant image-ids"""
@staticmethod
def _extract_data(page):
def _extract_data(self, page):
"""Extract image metadata from 'page'"""
extr = text.extract_from(page)
keywords = text.unescape(extr(
@@ -90,8 +89,7 @@ class NijieExtractor(AsynchronousMixin, BaseExtractor):
"tags" : keywords[2:-1],
}
@staticmethod
def _extract_data_horne(page):
def _extract_data_horne(self, page):
"""Extract image metadata from 'page'"""
extr = text.extract_from(page)
keywords = text.unescape(extr(
@@ -124,8 +122,7 @@ class NijieExtractor(AsynchronousMixin, BaseExtractor):
# do NOT use text.extr() here, as it doesn't support a pos argument
return (text.extract(page, 'itemprop="image" src="', '"', pos)[0],)
@staticmethod
def _extract_user_name(page):
def _extract_user_name(self, page):
return text.unescape(text.extr(page, "<br />", "<"))
def login(self):
@@ -248,8 +245,7 @@ class NijieNuitaExtractor(NijieExtractor):
data["user_name"] = self.user_name
return data
@staticmethod
def _extract_user_name(page):
def _extract_user_name(self, page):
return text.unescape(text.extr(page, "<title>", "さんの抜いた"))
@@ -262,8 +258,7 @@ class NijieFeedExtractor(NijieExtractor):
def image_ids(self):
return self._pagination("like_user_view")
@staticmethod
def _extract_user_name(page):
def _extract_user_name(self, page):
return ""

View File

@@ -98,8 +98,7 @@ class NozomiExtractor(Extractor):
def metadata(self):
return {}
@staticmethod
def _list(src):
def _list(self, src):
return [x["tagname_display"] for x in src] if src else ()

View File

@@ -70,13 +70,11 @@ class NsfwalbumAlbumExtractor(GalleryExtractor):
self.root, image_id, spirit),),
}
@staticmethod
def _validate_response(response):
def _validate_response(self, response):
return not response.url.endswith(
("/no_image.jpg", "/placeholder.png", "/error.jpg"))
@staticmethod
def _annihilate(value, base=6):
def _annihilate(self, value, base=6):
return "".join(
chr(ord(char) ^ base)
for char in value

View File

@@ -108,8 +108,7 @@ class PahealTagExtractor(PahealExtractor):
return
pnum += 1
@staticmethod
def _extract_data(post):
def _extract_data(self, post):
pid , pos = text.extract(post, "", "'")
data, pos = text.extract(post, "title='", "'", pos)
md5 , pos = text.extract(post, "/_thumbs/", "/", pos)

View File

@@ -188,16 +188,14 @@ class PatreonExtractor(Extractor):
return attr
@staticmethod
def _transform(included):
def _transform(self, included):
"""Transform 'included' into an easier to handle format"""
result = collections.defaultdict(dict)
for inc in included:
result[inc["type"]][inc["id"]] = inc["attributes"]
return result
@staticmethod
def _files(post, included, key):
def _files(self, post, included, key):
"""Build a list of files"""
files = post["relationships"].get(key)
if files and files.get("data"):
@@ -226,8 +224,7 @@ class PatreonExtractor(Extractor):
cd = response.headers.get("Content-Disposition")
return text.extr(cd, 'filename="', '"')
@staticmethod
def _filehash(url):
def _filehash(self, url):
"""Extract MD5 hash from a download URL"""
parts = url.partition("?")[0].split("/")
parts.reverse()
@@ -237,8 +234,7 @@ class PatreonExtractor(Extractor):
return part
return ""
@staticmethod
def _build_url(endpoint, query):
def _build_url(self, endpoint, query):
return (
"https://www.patreon.com/api/" + endpoint +

View File

@@ -35,8 +35,7 @@ class PhilomenaExtractor(BooruExtractor):
return url.rpartition(".")[0] + ".svg"
return url
@staticmethod
def _prepare(post):
def _prepare(self, post):
post["date"] = text.parse_datetime(
post["created_at"][:19], "%Y-%m-%dT%H:%M:%S")

View File

@@ -336,8 +336,7 @@ class PixivExtractor(Extractor):
if fmt in urls:
yield urls[fmt]
@staticmethod
def _date_from_url(url, offset=timedelta(hours=9)):
def _date_from_url(self, url, offset=timedelta(hours=9)):
try:
_, _, _, _, _, y, m, d, H, M, S, _ = url.split("/")
return datetime(
@@ -345,8 +344,7 @@ class PixivExtractor(Extractor):
except Exception:
return None
@staticmethod
def _make_work(kind, url, user):
def _make_work(self, kind, url, user):
p = url.split("/")
return {
"create_date" : "{}-{}-{}T{}:{}:{}+09:00".format(

View File

@@ -28,8 +28,7 @@ class PlurkExtractor(Extractor):
def plurks(self):
"""Return an iterable with all relevant 'plurk' objects"""
@staticmethod
def _urls(obj):
def _urls(self, obj):
"""Extract URLs from a 'plurk' object"""
return text.extract_iter(obj["content"], ' href="', '"')
@@ -59,8 +58,7 @@ class PlurkExtractor(Extractor):
del data["count"]
data["from_response_id"] = info["responses"][-1]["id"] + 1
@staticmethod
def _load(data):
def _load(self, data):
if not data:
raise exception.NotFoundError("user")
return util.json_loads(

View File

@@ -573,8 +573,7 @@ class RedditAPI():
sid = self.extractor.config(key)
return self._decode(sid.rpartition("_")[2].lower()) if sid else default
@staticmethod
def _decode(sid):
def _decode(self, sid):
return util.bdecode(sid, "0123456789abcdefghijklmnopqrstuvwxyz")

View File

@@ -63,20 +63,17 @@ class SankakucomplexArticleExtractor(SankakucomplexExtractor):
file.update(data)
yield Message.Url, url, file
@staticmethod
def _extract_images(content):
def _extract_images(self, content):
orig_sub = util.re(r"-\d+x\d+\.").sub
return [
orig_sub(".", url) for url in
util.unique(text.extract_iter(content, 'data-lazy-src="', '"'))
]
@staticmethod
def _extract_videos(content):
def _extract_videos(self, content):
return util.re(r"<source [^>]*src=[\"']([^\"']+)").findall(content)
@staticmethod
def _extract_embeds(content):
def _extract_embeds(self, content):
return [
"ytdl:" + url for url in
util.re(r"<iframe [^>]*src=[\"']([^\"']+)").findall(content)

View File

@@ -234,14 +234,12 @@ class SmugmugAPI(oauth.OAuth1API):
return
params["start"] += params["count"]
@staticmethod
def _extend(endpoint, expands):
def _extend(self, endpoint, expands):
if expands:
endpoint += "?_expand=" + expands
return endpoint
@staticmethod
def _apply_expansions(data, expands):
def _apply_expansions(self, data, expands):
def unwrap(response):
locator = response["Locator"]

View File

@@ -58,8 +58,7 @@ class SzurubooruExtractor(booru.BooruExtractor):
url = self.root + "/" + url
return url
@staticmethod
def _prepare(post):
def _prepare(self, post):
post["date"] = text.parse_datetime(
post["creationTime"], "%Y-%m-%dT%H:%M:%S.%fZ")

View File

@@ -158,8 +158,7 @@ class TsuminoSearchExtractor(TsuminoBase, Extractor):
raise exception.StopExtraction(
"Invalid search query '%s' (%s)", query, exc)
@staticmethod
def _parse_simple(query):
def _parse_simple(self, query):
"""Parse search query with format '?<key>=value>'"""
key, _, value = query.partition("=")
tag_types = {
@@ -179,8 +178,7 @@ class TsuminoSearchExtractor(TsuminoBase, Extractor):
"Tags[0][Exclude]": "false",
}
@staticmethod
def _parse_jsurl(data):
def _parse_jsurl(self, data):
"""Parse search query in JSURL format
Nested lists and dicts are handled in a special way to deal

View File

@@ -198,14 +198,12 @@ class TumblrExtractor(Extractor):
"', '".join(sorted(invalid)))
return types
@staticmethod
def _prepare(url, post):
def _prepare(self, url, post):
text.nameext_from_url(url, post)
post["hash"] = post["filename"].partition("_")[2]
return Message.Url, url, post
@staticmethod
def _prepare_image(url, post):
def _prepare_image(self, url, post):
text.nameext_from_url(url, post)
# try ".gifv" (#3095)
@@ -226,8 +224,7 @@ class TumblrExtractor(Extractor):
return Message.Url, url, post
@staticmethod
def _prepare_avatar(url, post, blog):
def _prepare_avatar(self, url, post, blog):
text.nameext_from_url(url, post)
post["num"] = post["count"] = 1
post["blog"] = blog
@@ -298,8 +295,7 @@ class TumblrPostExtractor(TumblrExtractor):
def posts(self):
return self.api.posts(self.blog, {"id": self.post_id})
@staticmethod
def _setup_posttypes():
def _setup_posttypes(self):
return POST_TYPES

View File

@@ -20,13 +20,11 @@ class TumblrgalleryExtractor(GalleryExtractor):
root = "https://tumblrgallery.xyz"
referer = False
@staticmethod
def _urls_from_page(page):
def _urls_from_page(self, page):
return text.extract_iter(
page, '<div class="report"> <a class="xx-co-me" href="', '"')
@staticmethod
def _data_from_url(url):
def _data_from_url(self, url):
filename = text.nameext_from_url(url)["filename"]
parts = filename.split("_")
try:

View File

@@ -36,8 +36,7 @@ class TwibooruExtractor(BooruExtractor):
return post["view_url"].rpartition(".")[0] + ".svg"
return post["view_url"]
@staticmethod
def _prepare(post):
def _prepare(self, post):
post["date"] = text.parse_datetime(
post["created_at"], "%Y-%m-%dT%H:%M:%S.%fZ")

View File

@@ -48,8 +48,7 @@ class UnsplashExtractor(Extractor):
yield Message.Directory, photo
yield Message.Url, url, photo
@staticmethod
def metadata():
def metadata(self):
return None
def skip(self, num):

View File

@@ -73,8 +73,7 @@ class VichanThreadExtractor(VichanExtractor):
self.root, post["board"], post["tim"], post["ext"])
return Message.Url, post["url"], post
@staticmethod
def _process_8kun(post, data):
def _process_8kun(self, post, data):
post.update(data)
post["extension"] = post["ext"][1:]

View File

@@ -109,8 +109,7 @@ class VscoExtractor(Extractor):
yield from medias
params["page"] += 1
@staticmethod
def _transform_media(media):
def _transform_media(self, media):
if "responsiveUrl" not in media:
return None
media["_id"] = media["id"]
@@ -122,8 +121,7 @@ class VscoExtractor(Extractor):
media["image_meta"] = media.get("imageMeta")
return media
@staticmethod
def _transform_video(media):
def _transform_video(self, media):
media["is_video"] = True
media["grid_name"] = ""
media["video_url"] = media["playback_url"]

View File

@@ -39,8 +39,7 @@ class WallhavenExtractor(Extractor):
"""Return general metadata"""
return ()
@staticmethod
def _transform(wp):
def _transform(self, wp):
wp["url"] = wp.pop("path")
if "tags" in wp:
wp["tags"] = [t["name"] for t in wp["tags"]]

View File

@@ -20,8 +20,7 @@ class WeasylExtractor(Extractor):
root = "https://www.weasyl.com"
useragent = util.USERAGENT
@staticmethod
def populate_submission(data):
def populate_submission(self, data):
# Some submissions don't have content and can be skipped
if "submission" in data["media"]:
data["url"] = data["media"]["submission"][0]["url"]

View File

@@ -52,8 +52,7 @@ class WikimediaExtractor(BaseExtractor):
return url
raise exception.StopExtraction("Unable to find API endpoint")
@staticmethod
def prepare(image):
def prepare(self, image):
"""Adjust the content of an image object"""
image["metadata"] = {
m["name"]: m["value"]

View File

@@ -463,8 +463,7 @@ class Literal():
# __getattr__, __getattribute__, and __class_getitem__
# are all slower than regular __getitem__
@staticmethod
def __getitem__(key):
def __getitem__(self, key):
return key

View File

@@ -682,8 +682,7 @@ class DownloadJob(Job):
for hook, callback in hooks.items():
self.hooks[hook].append(callback)
@staticmethod
def _call_hook(callback, condition, pathfmt):
def _call_hook(self, callback, condition, pathfmt):
if condition(pathfmt.kwdict):
callback(pathfmt)
@@ -818,12 +817,10 @@ class UrlJob(Job):
if depth >= self.maxdepth:
self.handle_queue = self.handle_url
@staticmethod
def handle_url(url, _):
def handle_url(self, url, _):
stdout_write(url + "\n")
@staticmethod
def handle_url_fallback(url, kwdict):
def handle_url_fallback(self, url, kwdict):
stdout_write(url + "\n")
if "_fallback" in kwdict:
for url in kwdict["_fallback"]:

View File

@@ -483,8 +483,7 @@ class CustomOutput():
self._fmt_progress_total = (options.get("progress-total") or
"\r{3:>3}% {0:>7}B {1:>7}B/s ").format
@staticmethod
def _make_func(shorten, format_string, limit):
def _make_func(self, shorten, format_string, limit):
fmt = format_string.format
return lambda txt: fmt(shorten(txt, limit, CHAR_ELLIPSIES))

View File

@@ -183,8 +183,7 @@ class PathFormat():
return self.check_file()
return False
@staticmethod
def check_file():
def check_file(self):
return True
def _enum_file(self):

View File

@@ -62,12 +62,10 @@ class ComparePP(PostProcessor):
def _compare(self, f1, f2):
return self._compare_size(f1, f2) and self._compare_content(f1, f2)
@staticmethod
def _compare_size(f1, f2):
def _compare_size(self, f1, f2):
return os.stat(f1).st_size == os.stat(f2).st_size
@staticmethod
def _compare_content(f1, f2):
def _compare_content(self, f1, f2):
size = 16384
with open(f1, "rb") as fp1, open(f2, "rb") as fp2:
while True:

View File

@@ -268,8 +268,7 @@ class MetadataPP(PostProcessor):
if not private:
return util.filter_dict
@staticmethod
def _make_encoder(options, indent=None):
def _make_encoder(self, options, indent=None):
return json.JSONEncoder(
ensure_ascii=options.get("ascii", False),
sort_keys=options.get("sort", False),

View File

@@ -425,15 +425,13 @@ class UgoiraPP(PostProcessor):
return (None, None)
@staticmethod
def _delay_gcd(frames):
def _delay_gcd(self, frames):
result = frames[0]["delay"]
for f in frames:
result = gcd(result, f["delay"])
return result
@staticmethod
def _delay_is_uniform(frames):
def _delay_is_uniform(self, frames):
delay = frames[0]["delay"]
for f in frames:
if f["delay"] != delay:

View File

@@ -684,8 +684,7 @@ class CustomNone():
def __call__(self, *args, **kwargs):
return self
@staticmethod
def __next__():
def __next__(self):
raise StopIteration
def __eq__(self, other):
@@ -733,20 +732,17 @@ class CustomNone():
__abs__ = identity
__invert__ = identity
@staticmethod
def __len__():
def __len__(self):
return 0
__int__ = __len__
__hash__ = __len__
__index__ = __len__
@staticmethod
def __format__(_):
def __format__(self, _):
return "None"
@staticmethod
def __str__():
def __str__(self):
return "None"
__repr__ = __str__
@@ -1052,8 +1048,7 @@ class RangePredicate():
return True
return False
@staticmethod
def _parse(rangespec):
def _parse(self, rangespec):
"""Parse an integer range string and return the resulting ranges
Examples:

View File

@@ -223,8 +223,7 @@ class TestConfigFiles(unittest.TestCase):
self.assertIsInstance(cfg, dict)
self.assertTrue(cfg)
@staticmethod
def _load(name):
def _load(self, name):
path = os.path.join(ROOTDIR, "docs", name)
try:
with open(path) as fp:

View File

@@ -296,8 +296,7 @@ class TestExtractorWait(unittest.TestCase):
u = self._isotime_to_seconds(until.time().isoformat()[:8])
self.assertLessEqual(o-u, 1.0)
@staticmethod
def _isotime_to_seconds(isotime):
def _isotime_to_seconds(self, isotime):
parts = isotime.split(":")
return int(parts[0]) * 3600 + int(parts[1]) * 60 + int(parts[2])

View File

@@ -713,8 +713,7 @@ class MetadataTest(BasePostprocessorTest):
}
""")
@staticmethod
def _output(mock):
def _output(self, mock):
return "".join(
call[1][0]
for call in mock.mock_calls

View File

@@ -28,11 +28,18 @@ from gallery_dl import util, text, exception # noqa E402
class TestRange(unittest.TestCase):
def test_parse_empty(self, f=util.RangePredicate._parse):
def setUp(self):
self.predicate = util.RangePredicate("")
def test_parse_empty(self):
f = self.predicate._parse
self.assertEqual(f(""), [])
self.assertEqual(f([]), [])
def test_parse_digit(self, f=util.RangePredicate._parse):
def test_parse_digit(self):
f = self.predicate._parse
self.assertEqual(f("2"), [range(2, 3)])
self.assertEqual(
@@ -42,7 +49,9 @@ class TestRange(unittest.TestCase):
range(4, 5)],
)
def test_parse_range(self, f=util.RangePredicate._parse):
def test_parse_range(self):
f = self.predicate._parse
self.assertEqual(f("1-2"), [range(1, 3)])
self.assertEqual(f("2-"), [range(2, sys.maxsize)])
self.assertEqual(f("-3"), [range(1, 4)])
@@ -62,7 +71,9 @@ class TestRange(unittest.TestCase):
range(2, 7)],
)
def test_parse_slice(self, f=util.RangePredicate._parse):
def test_parse_slice(self):
f = self.predicate._parse
self.assertEqual(f("2:4") , [range(2, 4)])
self.assertEqual(f("3::") , [range(3, sys.maxsize)])
self.assertEqual(f(":4:") , [range(1, 4)])