From bae874f37057fabbba9b6345fcd5bb7c913193fd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mike=20F=C3=A4hrmann?=
Date: Tue, 2 Mar 2021 22:55:45 +0100
Subject: [PATCH] replace 'wait-min/-max' with 'sleep-request'
on exhentai, idolcomplex, reactor
---
docs/configuration.rst | 26 --------------------------
gallery_dl/extractor/exhentai.py | 23 ++---------------------
gallery_dl/extractor/idolcomplex.py | 15 ++-------------
gallery_dl/extractor/reactor.py | 11 +----------
4 files changed, 5 insertions(+), 70 deletions(-)
diff --git a/docs/configuration.rst b/docs/configuration.rst
index 62cd4de9..3629c948 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -949,21 +949,6 @@ Description
Download full-sized original images if available.
-extractor.exhentai.wait-min & .wait-max
----------------------------------------
-Type
- ``float``
-Default
- ``3.0`` and ``6.0``
-Description
- Minimum and maximum wait time in seconds between each image
-
- ExHentai detects and blocks automated downloaders.
- *gallery-dl* waits a randomly selected number of
- seconds between ``wait-min`` and ``wait-max`` after
- each image to prevent getting blocked.
-
-
extractor.flickr.access-token & .access-token-secret
----------------------------------------------------
Type
@@ -1353,17 +1338,6 @@ Description
Also search Plurk comments for URLs.
-extractor.reactor.wait-min & .wait-max
---------------------------------------
-Type
- ``float``
-Default
- ``3.0`` and ``6.0``
-Description
- Minimum and maximum wait time in seconds between HTTP requests
- during the extraction process.
-
-
extractor.readcomiconline.captcha
---------------------------------
Type
diff --git a/gallery_dl/extractor/exhentai.py b/gallery_dl/extractor/exhentai.py
index d1f6ec25..5a7de238 100644
--- a/gallery_dl/extractor/exhentai.py
+++ b/gallery_dl/extractor/exhentai.py
@@ -12,8 +12,6 @@ from .common import Extractor, Message
from .. import text, util, exception
from ..cache import cache
import itertools
-import random
-import time
import math
BASE_PATTERN = r"(?:https?://)?(e[x-]|g\.e-)hentai\.org"
@@ -29,6 +27,7 @@ class ExhentaiExtractor(Extractor):
cookienames = ("ipb_member_id", "ipb_pass_hash")
cookiedomain = ".exhentai.org"
root = "https://exhentai.org"
+ request_interval = 5.0
LIMIT = False
@@ -46,8 +45,6 @@ class ExhentaiExtractor(Extractor):
Extractor.__init__(self, match)
self.limits = self.config("limits", True)
self.original = self.config("original", True)
- self.wait_min = self.config("wait-min", 3)
- self.wait_max = self.config("wait-max", 6)
if type(self.limits) is int:
self._limit_max = self.limits
@@ -56,8 +53,6 @@ class ExhentaiExtractor(Extractor):
self._limit_max = 0
self._remaining = 0
- if self.wait_max < self.wait_min:
- self.wait_max = self.wait_min
self.session.headers["Referer"] = self.root + "/"
if version != "ex":
self.session.cookies.set("nw", "1", domain=self.cookiedomain)
@@ -69,14 +64,6 @@ class ExhentaiExtractor(Extractor):
raise exception.AuthorizationError()
return response
- def wait(self, waittime=None):
- """Wait for a randomly chosen amount of seconds"""
- if not waittime:
- waittime = random.uniform(self.wait_min, self.wait_max)
- else:
- waittime = random.uniform(waittime * 0.66, waittime * 1.33)
- time.sleep(waittime)
-
def login(self):
"""Login and set necessary cookies"""
if self.LIMIT:
@@ -200,7 +187,6 @@ class ExhentaiGalleryExtractor(ExhentaiExtractor):
self.log.error("Failed to extract initial image token")
self.log.debug("Page content:\n%s", gpage)
return
- self.wait()
ipage = self._image_page()
else:
ipage = self._image_page()
@@ -210,7 +196,6 @@ class ExhentaiGalleryExtractor(ExhentaiExtractor):
self.log.debug("Page content:\n%s", ipage)
return
self.gallery_token = part.split("/")[1]
- self.wait()
gpage = self._gallery_page()
data = self.get_metadata(gpage)
@@ -225,7 +210,6 @@ class ExhentaiGalleryExtractor(ExhentaiExtractor):
self._check_limits(data)
if "/fullimg.php" in url:
data["extension"] = ""
- self.wait(self.wait_max / 4)
yield Message.Url, url, data
def get_metadata(self, page):
@@ -322,7 +306,6 @@ class ExhentaiGalleryExtractor(ExhentaiExtractor):
"showkey": self.key["show"],
}
for request["page"] in range(self.image_num + 1, self.count + 1):
- self.wait()
page = self.request(api_url, method="POST", json=request).json()
imgkey = nextkey
nextkey, pos = text.extract(page["i3"], "'", "'")
@@ -372,7 +355,7 @@ class ExhentaiGalleryExtractor(ExhentaiExtractor):
return page
def _check_limits(self, data):
- if not self._remaining or data["num"] % 20 == 0:
+ if not self._remaining or data["num"] % 25 == 0:
self._update_limits()
self._remaining -= data["cost"]
@@ -455,7 +438,6 @@ class ExhentaiSearchExtractor(ExhentaiExtractor):
def items(self):
self.login()
- yield Message.Version, 1
data = {"_extractor": ExhentaiGalleryExtractor}
while True:
@@ -472,7 +454,6 @@ class ExhentaiSearchExtractor(ExhentaiExtractor):
if 'class="ptdd">><' in page or ">No hits found
" in page:
return
self.params["page"] += 1
- self.wait()
class ExhentaiFavoriteExtractor(ExhentaiSearchExtractor):
diff --git a/gallery_dl/extractor/idolcomplex.py b/gallery_dl/extractor/idolcomplex.py
index 16fe0a07..3d4bcfb1 100644
--- a/gallery_dl/extractor/idolcomplex.py
+++ b/gallery_dl/extractor/idolcomplex.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-# Copyright 2018-2020 Mike Fährmann
+# Copyright 2018-2021 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
@@ -13,8 +13,6 @@ from .common import Message
from ..cache import cache
from .. import text, util, exception
import collections
-import random
-import time
import re
@@ -24,6 +22,7 @@ class IdolcomplexExtractor(SankakuExtractor):
cookienames = ("login", "pass_hash")
cookiedomain = "idol.sankakucomplex.com"
root = "https://" + cookiedomain
+ request_interval = 5.0
def __init__(self, match):
SankakuExtractor.__init__(self, match)
@@ -31,17 +30,12 @@ class IdolcomplexExtractor(SankakuExtractor):
self.start_page = 1
self.start_post = 0
self.extags = self.config("tags", False)
- self.wait_min = self.config("wait-min", 3.0)
- self.wait_max = self.config("wait-max", 6.0)
- if self.wait_max < self.wait_min:
- self.wait_max = self.wait_min
def items(self):
self.login()
data = self.metadata()
for post_id in util.advance(self.post_ids(), self.start_post):
- self.wait()
post = self._parse_post(post_id)
url = post["file_url"]
post.update(data)
@@ -130,10 +124,6 @@ class IdolcomplexExtractor(SankakuExtractor):
return data
- def wait(self):
- """Wait for a randomly chosen amount of seconds"""
- time.sleep(random.uniform(self.wait_min, self.wait_max))
-
class IdolcomplexTagExtractor(IdolcomplexExtractor):
"""Extractor for images from idol.sankakucomplex.com by search-tags"""
@@ -192,7 +182,6 @@ class IdolcomplexTagExtractor(IdolcomplexExtractor):
params["page"] = self.start_page
while True:
- self.wait()
page = self.request(self.root, params=params, retries=10).text
pos = page.find("") + 1
diff --git a/gallery_dl/extractor/reactor.py b/gallery_dl/extractor/reactor.py
index 8336e3ad..971347b9 100644
--- a/gallery_dl/extractor/reactor.py
+++ b/gallery_dl/extractor/reactor.py
@@ -11,11 +11,8 @@
from .common import Extractor, Message
from .. import text
import urllib.parse
-import random
-import time
import json
-
BASE_PATTERN = r"(?:https?://)?((?:[^/.]+\.)?reactor\.cc)"
@@ -25,17 +22,13 @@ class ReactorExtractor(Extractor):
filename_fmt = "{post_id}_{num:>02}{title[:100]:?_//}.{extension}"
archive_fmt = "{post_id}_{num}"
instances = ()
+ request_interval = 5.0
def __init__(self, match):
Extractor.__init__(self, match)
self.root = "http://" + match.group(1)
self.session.headers["Referer"] = self.root
- self.wait_min = self.config("wait-min", 3)
- self.wait_max = self.config("wait-max", 6)
- if self.wait_max < self.wait_min:
- self.wait_max = self.wait_min
-
if not self.category:
# set category based on domain name
netloc = urllib.parse.urlsplit(self.root).netloc
@@ -61,8 +54,6 @@ class ReactorExtractor(Extractor):
def _pagination(self, url):
while True:
- time.sleep(random.uniform(self.wait_min, self.wait_max))
-
response = self.request(url)
if response.history:
# sometimes there is a redirect from