The page you're looking for cannot be found.<" in content):
- raise exception.NotFoundError("page")
+ raise self.exc.NotFoundError("page")
self.request = Extractor.request.__get__(self)
return response
diff --git a/gallery_dl/extractor/myhentaigallery.py b/gallery_dl/extractor/myhentaigallery.py
index b94a73f4..adf13845 100644
--- a/gallery_dl/extractor/myhentaigallery.py
+++ b/gallery_dl/extractor/myhentaigallery.py
@@ -7,7 +7,7 @@
"""Extractors for https://myhentaigallery.com/"""
from .common import Extractor, GalleryExtractor, Message
-from .. import text, exception
+from .. import text
BASE_PATTERN = r"(?:https?://)?myhentaigallery\.com"
@@ -40,7 +40,7 @@ class MyhentaigalleryGalleryExtractor(MyhentaigalleryBase, GalleryExtractor):
title = title[4:]
if not title:
- raise exception.NotFoundError("gallery")
+ raise self.exc.NotFoundError("gallery")
return {
"title" : text.unescape(title),
diff --git a/gallery_dl/extractor/myportfolio.py b/gallery_dl/extractor/myportfolio.py
index 3a211228..faff4a37 100644
--- a/gallery_dl/extractor/myportfolio.py
+++ b/gallery_dl/extractor/myportfolio.py
@@ -9,7 +9,7 @@
"""Extractors for https://www.myportfolio.com/"""
from .common import Extractor, Message
-from .. import text, exception
+from .. import text
class MyportfolioGalleryExtractor(Extractor):
@@ -34,7 +34,7 @@ class MyportfolioGalleryExtractor(Extractor):
url = "https://" + self.domain + (self.path or "")
response = self.request(url)
if response.history and response.url.endswith(".adobe.com/missing"):
- raise exception.NotFoundError()
+ raise self.exc.NotFoundError()
page = response.text
projects = text.extr(
@@ -72,7 +72,7 @@ class MyportfolioGalleryExtractor(Extractor):
elif user:
user, _, title = user.partition(" - ")
else:
- raise exception.NotFoundError()
+ raise self.exc.NotFoundError()
return {
"user": text.unescape(user),
diff --git a/gallery_dl/extractor/newgrounds.py b/gallery_dl/extractor/newgrounds.py
index 9a8be8ab..1aac4e33 100644
--- a/gallery_dl/extractor/newgrounds.py
+++ b/gallery_dl/extractor/newgrounds.py
@@ -9,7 +9,7 @@
"""Extractors for https://www.newgrounds.com/"""
from .common import Extractor, Message, Dispatch
-from .. import text, util, dt, exception
+from .. import text, util, dt
from ..cache import cache
import itertools
@@ -143,7 +143,7 @@ class NewgroundsExtractor(Extractor):
if result.get("success"):
break
if "errors" in result:
- raise exception.AuthenticationError(
+ raise self.exc.AuthenticationError(
'"' + '", "'.join(result["errors"]) + '"')
if result.get("requiresMfa"):
@@ -370,7 +370,7 @@ class NewgroundsExtractor(Extractor):
return
if "errors" in data:
msg = ", ".join(text.unescape(e) for e in data["errors"])
- raise exception.AbortExtraction(msg)
+ raise self.exc.AbortExtraction(msg)
items = data.get("items")
if not items:
diff --git a/gallery_dl/extractor/nijie.py b/gallery_dl/extractor/nijie.py
index 181d54d6..70b4f48f 100644
--- a/gallery_dl/extractor/nijie.py
+++ b/gallery_dl/extractor/nijie.py
@@ -9,7 +9,7 @@
"""Extractors for nijie instances"""
from .common import BaseExtractor, Message, Dispatch, AsynchronousMixin
-from .. import text, dt, exception
+from .. import text, dt
from ..cache import cache
@@ -134,7 +134,7 @@ class NijieExtractor(AsynchronousMixin, BaseExtractor):
if username:
return self.cookies_update(self._login_impl(username, password))
- raise exception.AuthenticationError("Username and password required")
+ raise self.exc.AuthenticationError("Username and password required")
@cache(maxage=90*86400, keyarg=1)
def _login_impl(self, username, password):
@@ -145,7 +145,7 @@ class NijieExtractor(AsynchronousMixin, BaseExtractor):
response = self.request(url, method="POST", data=data)
if "/login.php" in response.text:
- raise exception.AuthenticationError()
+ raise self.exc.AuthenticationError()
return self.cookies
def _pagination(self, path):
diff --git a/gallery_dl/extractor/oauth.py b/gallery_dl/extractor/oauth.py
index 38bb5eeb..c7592d2b 100644
--- a/gallery_dl/extractor/oauth.py
+++ b/gallery_dl/extractor/oauth.py
@@ -9,7 +9,7 @@
"""Utility classes to setup OAuth and link accounts to gallery-dl"""
from .common import Extractor
-from .. import text, oauth, util, config, exception
+from .. import text, oauth, util, config
from ..output import stdout_write
from ..cache import cache, memcache
@@ -74,7 +74,7 @@ class OAuthBase(Extractor):
msg = "Received invalid"
if exc:
exc = f" ({exc.__class__.__name__}: {exc})"
- raise exception.AbortExtraction(f"{msg} OAuth response{exc}")
+ raise self.exc.AbortExtraction(f"{msg} OAuth response{exc}")
def send(self, msg):
"""Send 'msg' to the socket opened in 'recv()'"""
@@ -396,7 +396,7 @@ class OAuthMastodon(OAuthBase):
data = self.request_json(url, method="POST", data=data)
if "client_id" not in data or "client_secret" not in data:
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"Failed to register new application: '{data}'")
data["client-id"] = data.pop("client_id")
diff --git a/gallery_dl/extractor/paheal.py b/gallery_dl/extractor/paheal.py
index be22dea5..d533e76a 100644
--- a/gallery_dl/extractor/paheal.py
+++ b/gallery_dl/extractor/paheal.py
@@ -9,7 +9,7 @@
"""Extractors for https://rule34.paheal.net/"""
from .common import Extractor, Message
-from .. import text, exception
+from .. import text
class PahealExtractor(Extractor):
@@ -98,7 +98,7 @@ class PahealTagExtractor(PahealExtractor):
while True:
try:
page = self.request(base + str(pnum)).text
- except exception.HttpError as exc:
+ except self.exc.HttpError as exc:
if exc.status == 404:
return
raise
@@ -152,7 +152,7 @@ class PahealPostExtractor(PahealExtractor):
def get_posts(self):
try:
return (self._extract_post(self.groups[0]),)
- except exception.HttpError as exc:
+ except self.exc.HttpError as exc:
if exc.status == 404:
return ()
raise
diff --git a/gallery_dl/extractor/patreon.py b/gallery_dl/extractor/patreon.py
index 2635c4b3..cc58c6c9 100644
--- a/gallery_dl/extractor/patreon.py
+++ b/gallery_dl/extractor/patreon.py
@@ -9,7 +9,7 @@
"""Extractors for https://www.patreon.com/"""
from .common import Extractor, Message
-from .. import text, util, dt, exception
+from .. import text, util, dt
from ..cache import memcache
import collections
import itertools
@@ -347,7 +347,7 @@ class PatreonExtractor(Extractor):
except Exception:
pass
- raise exception.AbortExtraction("Unable to extract bootstrap data")
+ raise self.exc.AbortExtraction("Unable to extract bootstrap data")
class PatreonCollectionExtractor(PatreonExtractor):
@@ -428,12 +428,12 @@ class PatreonCreatorExtractor(PatreonExtractor):
data = None
data = self._extract_bootstrap(page)
return data["campaign"]["data"]["id"]
- except exception.ControlException:
+ except self.exc.ControlException:
pass
except Exception as exc:
if data:
self.log.debug(data)
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"Unable to extract campaign ID "
f"({exc.__class__.__name__}: {exc})")
@@ -442,7 +442,7 @@ class PatreonCreatorExtractor(PatreonExtractor):
page, r'{\"value\":{\"campaign\":{\"data\":{\"id\":\"', '\\"'):
return cid
- raise exception.AbortExtraction("Failed to extract campaign ID")
+ raise self.exc.AbortExtraction("Failed to extract campaign ID")
def _get_filters(self, params):
return "".join(
diff --git a/gallery_dl/extractor/pexels.py b/gallery_dl/extractor/pexels.py
index 5f4ebc72..68537528 100644
--- a/gallery_dl/extractor/pexels.py
+++ b/gallery_dl/extractor/pexels.py
@@ -9,7 +9,7 @@
"""Extractors for https://pexels.com/"""
from .common import Extractor, Message
-from .. import text, exception
+from .. import text
BASE_PATTERN = r"(?:https?://)?(?:www\.)?pexels\.com"
@@ -174,7 +174,7 @@ class PexelsAPI():
else:
self.extractor.log.debug(response.text)
- raise exception.AbortExtraction("API request failed")
+ raise self.exc.AbortExtraction("API request failed")
def _pagination(self, endpoint, params):
while True:
diff --git a/gallery_dl/extractor/philomena.py b/gallery_dl/extractor/philomena.py
index 2f52b9ab..74330c44 100644
--- a/gallery_dl/extractor/philomena.py
+++ b/gallery_dl/extractor/philomena.py
@@ -9,7 +9,7 @@
"""Extractors for Philomena sites"""
from .booru import BooruExtractor
-from .. import text, exception
+from .. import text
class PhilomenaExtractor(BooruExtractor):
@@ -113,7 +113,7 @@ class PhilomenaGalleryExtractor(PhilomenaExtractor):
try:
return {"gallery": self.api.gallery(self.groups[-1])}
except IndexError:
- raise exception.NotFoundError("gallery")
+ raise self.exc.NotFoundError("gallery")
def posts(self):
gallery_id = "gallery_id:" + self.groups[-1]
@@ -159,7 +159,7 @@ class PhilomenaAPI():
# error
self.extractor.log.debug(response.content)
- raise exception.HttpError("", response)
+ raise self.exc.HttpError("", response)
def _pagination(self, endpoint, params):
extr = self.extractor
diff --git a/gallery_dl/extractor/pholder.py b/gallery_dl/extractor/pholder.py
index 12e150d4..03ed9e0d 100644
--- a/gallery_dl/extractor/pholder.py
+++ b/gallery_dl/extractor/pholder.py
@@ -7,7 +7,7 @@
"""Extractors for https://pholder.com/"""
from .common import Extractor, Message
-from .. import text, util, exception
+from .. import text, util
BASE_PATTERN = r"(?:https?://)?(?:www\.)?pholder\.com"
@@ -50,7 +50,7 @@ class PholderExtractor(Extractor):
except ValueError:
pass
- raise exception.AbortExtraction("Could not locate window.data JSON.")
+ raise self.exc.AbortExtraction("Could not locate window.data JSON.")
def _posts(self, page_url):
params = {"page": 1}
diff --git a/gallery_dl/extractor/pillowfort.py b/gallery_dl/extractor/pillowfort.py
index 570ed85a..f99d3d61 100644
--- a/gallery_dl/extractor/pillowfort.py
+++ b/gallery_dl/extractor/pillowfort.py
@@ -10,7 +10,7 @@
from .common import Extractor, Message
from ..cache import cache
-from .. import text, exception
+from .. import text
BASE_PATTERN = r"(?:https?://)?www\.pillowfort\.social"
@@ -108,7 +108,7 @@ class PillowfortExtractor(Extractor):
response = self.request(url, method="POST", headers=headers, data=data)
if not response.history:
- raise exception.AuthenticationError()
+ raise self.exc.AuthenticationError()
return {
cookie.name: cookie.value
diff --git a/gallery_dl/extractor/pinterest.py b/gallery_dl/extractor/pinterest.py
index cd3d077c..652f04cf 100644
--- a/gallery_dl/extractor/pinterest.py
+++ b/gallery_dl/extractor/pinterest.py
@@ -9,7 +9,7 @@
"""Extractors for https://www.pinterest.com/"""
from .common import Extractor, Message
-from .. import text, util, exception
+from .. import text, util
import itertools
BASE_PATTERN = r"(?:https?://)?(?:\w+\.)?pinterest\.[\w.]+"
@@ -394,7 +394,7 @@ class PinterestPinitExtractor(PinterestExtractor):
f"/{self.groups[0]}/redirect/")
location = self.request_location(url)
if not location:
- raise exception.NotFoundError("pin")
+ raise self.exc.NotFoundError("pin")
elif PinterestPinExtractor.pattern.match(location):
yield Message.Queue, location, {
"_extractor": PinterestPinExtractor}
@@ -402,7 +402,7 @@ class PinterestPinitExtractor(PinterestExtractor):
yield Message.Queue, location, {
"_extractor": PinterestBoardExtractor}
else:
- raise exception.NotFoundError("pin")
+ raise self.exc.NotFoundError("pin")
class PinterestAPI():
@@ -545,9 +545,9 @@ class PinterestAPI():
return data
if response.status_code == 404:
resource = self.extractor.subcategory.rpartition("-")[2]
- raise exception.NotFoundError(resource)
+ raise self.exc.NotFoundError(resource)
self.extractor.log.debug("Server response: %s", response.text)
- raise exception.AbortExtraction("API request failed")
+ raise self.exc.AbortExtraction("API request failed")
def _pagination(self, resource, options):
while True:
diff --git a/gallery_dl/extractor/pixiv.py b/gallery_dl/extractor/pixiv.py
index 0a4f6b34..1deb7cc9 100644
--- a/gallery_dl/extractor/pixiv.py
+++ b/gallery_dl/extractor/pixiv.py
@@ -9,7 +9,7 @@
"""Extractors for https://www.pixiv.net/"""
from .common import Extractor, Message, Dispatch
-from .. import text, util, dt, exception
+from .. import text, util, dt
from ..cache import cache, memcache
import itertools
import hashlib
@@ -205,7 +205,7 @@ class PixivExtractor(Extractor):
url = f"{base}0.{ext}"
self.request(url, method="HEAD")
break
- except exception.HttpError:
+ except self.exc.HttpError:
pass
else:
self.log.warning(
@@ -329,7 +329,7 @@ class PixivExtractor(Extractor):
url = f"{base}_p0.{ext}"
self.request(url, method="HEAD")
return url
- except exception.HttpError:
+ except self.exc.HttpError:
pass
def _sanitize_ajax_caption(self, caption):
@@ -721,7 +721,7 @@ class PixivRankingExtractor(PixivExtractor):
try:
self.mode = mode = mode_map[mode]
except KeyError:
- raise exception.AbortExtraction(f"Invalid mode '{mode}'")
+ raise self.exc.AbortExtraction(f"Invalid mode '{mode}'")
if date := query.get("date"):
if len(date) == 8 and date.isdecimal():
@@ -772,7 +772,7 @@ class PixivSearchExtractor(PixivExtractor):
try:
self.word = query["word"]
except KeyError:
- raise exception.AbortExtraction("Missing search term")
+ raise self.exc.AbortExtraction("Missing search term")
sort = query.get("order", "date_d")
sort_map = {
@@ -785,7 +785,7 @@ class PixivSearchExtractor(PixivExtractor):
try:
self.sort = sort = sort_map[sort]
except KeyError:
- raise exception.AbortExtraction(f"Invalid search order '{sort}'")
+ raise self.exc.AbortExtraction(f"Invalid search order '{sort}'")
target = query.get("s_mode", "s_tag_full")
target_map = {
@@ -796,7 +796,7 @@ class PixivSearchExtractor(PixivExtractor):
try:
self.target = target = target_map[target]
except KeyError:
- raise exception.AbortExtraction(f"Invalid search mode '{target}'")
+ raise self.exc.AbortExtraction(f"Invalid search mode '{target}'")
self.date_start = query.get("scd")
self.date_end = query.get("ecd")
@@ -1153,7 +1153,7 @@ class PixivAppAPI():
@cache(maxage=3600, keyarg=1)
def _login_impl(self, username):
if not self.refresh_token:
- raise exception.AuthenticationError(
+ raise self.exc.AuthenticationError(
"'refresh-token' required.\n"
"Run `gallery-dl oauth:pixiv` to get one.")
@@ -1178,7 +1178,7 @@ class PixivAppAPI():
url, method="POST", headers=headers, data=data, fatal=False)
if response.status_code >= 400:
self.log.debug(response.text)
- raise exception.AuthenticationError("Invalid refresh token")
+ raise self.exc.AuthenticationError("Invalid refresh token")
data = response.json()["response"]
return data["user"], "Bearer " + data["access_token"]
@@ -1305,7 +1305,7 @@ class PixivAppAPI():
self.log.debug(data)
if response.status_code == 404:
- raise exception.NotFoundError()
+ raise self.exc.NotFoundError()
error = data["error"]
if "rate limit" in (error.get("message") or "").lower():
@@ -1315,7 +1315,7 @@ class PixivAppAPI():
msg = (f"'{msg}'" if (msg := error.get("user_message")) else
f"'{msg}'" if (msg := error.get("message")) else
error)
- raise exception.AbortExtraction("API request failed: " + msg)
+ raise self.exc.AbortExtraction("API request failed: " + msg)
def _pagination(self, endpoint, params,
key_items="illusts", key_data=None, key_user=None):
@@ -1326,7 +1326,7 @@ class PixivAppAPI():
if key_user is not None and not data[key_user].get("id"):
user = self.user_detail(self.extractor.user_id, fatal=False)
if user.get("error"):
- raise exception.NotFoundError("user")
+ raise self.exc.NotFoundError("user")
return
while True:
diff --git a/gallery_dl/extractor/pixnet.py b/gallery_dl/extractor/pixnet.py
index 68f546ba..974c3cbf 100644
--- a/gallery_dl/extractor/pixnet.py
+++ b/gallery_dl/extractor/pixnet.py
@@ -9,7 +9,7 @@
"""Extractors for https://www.pixnet.net/"""
from .common import Extractor, Message
-from .. import text, exception
+from .. import text
BASE_PATTERN = r"(?:https?://)?(?!www\.)([\w-]+)\.pixnet.net"
@@ -52,7 +52,7 @@ class PixnetExtractor(Extractor):
pnext = text.extr(page, 'class="nextBtn"', '>')
if pnext is None and 'name="albumpass">' in page:
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"Album {self.item_id} is password-protected.")
if "href" not in pnext:
return
diff --git a/gallery_dl/extractor/plurk.py b/gallery_dl/extractor/plurk.py
index 76ca59f7..c0a04bc8 100644
--- a/gallery_dl/extractor/plurk.py
+++ b/gallery_dl/extractor/plurk.py
@@ -9,7 +9,7 @@
"""Extractors for https://www.plurk.com/"""
from .common import Extractor, Message
-from .. import text, util, dt, exception
+from .. import text, util, dt
class PlurkExtractor(Extractor):
@@ -59,7 +59,7 @@ class PlurkExtractor(Extractor):
def _load(self, data):
if not data:
- raise exception.NotFoundError("user")
+ raise self.exc.NotFoundError("user")
return util.json_loads(
text.re(r"new Date\(([^)]+)\)").sub(r"\1", data))
diff --git a/gallery_dl/extractor/poringa.py b/gallery_dl/extractor/poringa.py
index 94a493f2..02fa093b 100644
--- a/gallery_dl/extractor/poringa.py
+++ b/gallery_dl/extractor/poringa.py
@@ -7,7 +7,7 @@
"""Extractors for http://www.poringa.net/"""
from .common import Extractor, Message
-from .. import text, exception
+from .. import text
from ..cache import cache
import itertools
@@ -32,7 +32,7 @@ class PoringaExtractor(Extractor):
try:
response = self.request(url)
- except exception.HttpError as exc:
+ except self.exc.HttpError as exc:
self.log.warning(
"Unable to fetch posts for '%s' (%s)", post_id, exc)
continue
diff --git a/gallery_dl/extractor/pornhub.py b/gallery_dl/extractor/pornhub.py
index 3dd8b2a6..c2b45498 100644
--- a/gallery_dl/extractor/pornhub.py
+++ b/gallery_dl/extractor/pornhub.py
@@ -9,7 +9,7 @@
"""Extractors for https://www.pornhub.com/"""
from .common import Extractor, Message, Dispatch
-from .. import text, exception
+from .. import text
BASE_PATTERN = r"(?:https?://)?(?:[\w-]+\.)?pornhub\.com"
@@ -109,7 +109,7 @@ class PornhubGalleryExtractor(PornhubExtractor):
data = self.request_json(url, params=params)
if not (images := data.get("photos")):
- raise exception.AuthorizationError()
+ raise self.exc.AuthorizationError()
key = end = self._first
results = []
diff --git a/gallery_dl/extractor/postmill.py b/gallery_dl/extractor/postmill.py
index 22d2bded..99f44317 100644
--- a/gallery_dl/extractor/postmill.py
+++ b/gallery_dl/extractor/postmill.py
@@ -7,7 +7,7 @@
"""Extractors for Postmill instances"""
from .common import BaseExtractor, Message
-from .. import text, exception
+from .. import text
class PostmillExtractor(BaseExtractor):
@@ -102,7 +102,7 @@ class PostmillSubmissionsExtractor(PostmillExtractor):
if response.history:
redirect_url = response.url
if redirect_url == self.root + "/login":
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"HTTP redirect to login page ({redirect_url})")
page = response.text
diff --git a/gallery_dl/extractor/readcomiconline.py b/gallery_dl/extractor/readcomiconline.py
index 24a01712..6add5724 100644
--- a/gallery_dl/extractor/readcomiconline.py
+++ b/gallery_dl/extractor/readcomiconline.py
@@ -9,7 +9,7 @@
"""Extractors for https://readcomiconline.li/"""
from .common import Extractor, ChapterExtractor, MangaExtractor
-from .. import text, exception
+from .. import text
import binascii
BASE_PATTERN = r"(?i)(?:https?://)?(?:www\.)?readcomiconline\.(?:li|to)"
@@ -36,7 +36,7 @@ class ReadcomiconlineBase():
"the CAPTCHA, and press ENTER to continue", response.url)
self.input()
else:
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"Redirect to \n{response.url}\nVisit this URL in your "
f"browser and solve the CAPTCHA to continue")
diff --git a/gallery_dl/extractor/reddit.py b/gallery_dl/extractor/reddit.py
index e731c841..0cd7ef71 100644
--- a/gallery_dl/extractor/reddit.py
+++ b/gallery_dl/extractor/reddit.py
@@ -9,7 +9,7 @@
"""Extractors for https://www.reddit.com/"""
from .common import Extractor, Message
-from .. import text, util, exception
+from .. import text, util
from ..cache import cache
@@ -525,7 +525,7 @@ class RedditAPI():
if response.status_code != 200:
self.log.debug("Server response: %s", data)
- raise exception.AuthenticationError(
+ raise self.exc.AuthenticationError(
f"\"{data.get('error')}: {data.get('message')}\"")
return "Bearer " + data["access_token"]
@@ -555,16 +555,16 @@ class RedditAPI():
try:
data = response.json()
except ValueError:
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
text.remove_html(response.text))
if "error" in data:
if data["error"] == 403:
- raise exception.AuthorizationError()
+ raise self.exc.AuthorizationError()
if data["error"] == 404:
- raise exception.NotFoundError()
+ raise self.exc.NotFoundError()
self.log.debug(data)
- raise exception.AbortExtraction(data.get("message"))
+ raise self.exc.AbortExtraction(data.get("message"))
return data
def _pagination(self, endpoint, params):
@@ -592,7 +592,7 @@ class RedditAPI():
if post["num_comments"] and self.comments:
try:
yield self.submission(post["id"])
- except exception.AuthorizationError:
+ except self.exc.AuthorizationError:
pass
else:
yield post, ()
diff --git a/gallery_dl/extractor/rule34xyz.py b/gallery_dl/extractor/rule34xyz.py
index 6b313d8a..8c5f3e4d 100644
--- a/gallery_dl/extractor/rule34xyz.py
+++ b/gallery_dl/extractor/rule34xyz.py
@@ -9,7 +9,7 @@
"""Extractors for https://rule34.xyz/"""
from .booru import BooruExtractor
-from .. import text, exception
+from .. import text
from ..cache import cache
import collections
@@ -129,7 +129,7 @@ class Rule34xyzExtractor(BooruExtractor):
if jwt := response.get("jwt"):
return "Bearer " + jwt
- raise exception.AuthenticationError(
+ raise self.exc.AuthenticationError(
(msg := response.get("message")) and f'"{msg}"')
diff --git a/gallery_dl/extractor/sankaku.py b/gallery_dl/extractor/sankaku.py
index 6cb9f79d..b8595540 100644
--- a/gallery_dl/extractor/sankaku.py
+++ b/gallery_dl/extractor/sankaku.py
@@ -10,7 +10,7 @@
from .booru import BooruExtractor
from .common import Message
-from .. import text, util, exception
+from .. import text, util
from ..cache import cache
import collections
@@ -284,7 +284,7 @@ class SankakuAPI():
if response.status_code == 429:
until = response.headers.get("X-RateLimit-Reset")
if not until and b"_tags-explicit-limit" in response.content:
- raise exception.AuthorizationError(
+ raise self.exc.AuthorizationError(
"Search tag limit exceeded")
seconds = None if until else 600
self.extractor.wait(until=until, seconds=seconds)
@@ -305,7 +305,7 @@ class SankakuAPI():
code = f"'{code.rpartition('__')[2].replace('-', ' ')}'"
except Exception:
pass
- raise exception.AbortExtraction(code)
+ raise self.exc.AbortExtraction(code)
return data
def _pagination(self, endpoint, params):
@@ -365,5 +365,5 @@ def _authenticate_impl(extr, username, password):
data = response.json()
if response.status_code >= 400 or not data.get("success"):
- raise exception.AuthenticationError(data.get("error"))
+ raise extr.exc.AuthenticationError(data.get("error"))
return "Bearer " + data["access_token"]
diff --git a/gallery_dl/extractor/schalenetwork.py b/gallery_dl/extractor/schalenetwork.py
index 4a927b51..bb26d9fc 100644
--- a/gallery_dl/extractor/schalenetwork.py
+++ b/gallery_dl/extractor/schalenetwork.py
@@ -9,7 +9,7 @@
"""Extractors for https://niyaniya.moe/"""
from .common import GalleryExtractor, Extractor, Message
-from .. import text, exception
+from .. import text
import collections
BASE_PATTERN = (
@@ -66,7 +66,7 @@ class SchalenetworkExtractor(Extractor):
if token := self.config("token"):
return "Bearer " + token.rpartition(' ')[2]
if required:
- raise exception.AuthRequired("'token'", "your favorites")
+ raise self.exc.AuthRequired("'token'", "your favorites")
def _crt(self):
crt = self.config("crt")
@@ -88,7 +88,7 @@ class SchalenetworkExtractor(Extractor):
msg = None
else:
msg = f"{exc.status} {exc.response.reason}"
- raise exception.AuthRequired(
+ raise self.exc.AuthRequired(
"'crt' query parameter & matching 'user-agent'", None, msg)
@@ -153,7 +153,7 @@ class SchalenetworkGalleryExtractor(SchalenetworkExtractor, GalleryExtractor):
try:
data_fmt = self.request_json(
url, method="POST", headers=headers)
- except exception.HttpError as exc:
+ except self.exc.HttpError as exc:
self._require_auth(exc)
self.fmt = self._select_format(data_fmt["data"])
@@ -217,7 +217,7 @@ class SchalenetworkGalleryExtractor(SchalenetworkExtractor, GalleryExtractor):
self.log.debug("%s: Format %s is not available",
self.groups[1], fmtid)
else:
- raise exception.NotFoundError("format")
+ raise self.exc.NotFoundError("format")
self.log.debug("%s: Selected format %s", self.groups[1], fmtid)
fmt["w"] = fmtid
diff --git a/gallery_dl/extractor/scrolller.py b/gallery_dl/extractor/scrolller.py
index b4693ae8..e9554756 100644
--- a/gallery_dl/extractor/scrolller.py
+++ b/gallery_dl/extractor/scrolller.py
@@ -9,7 +9,7 @@
"""Extractors for https://scrolller.com/"""
from .common import Extractor, Message
-from .. import text, util, exception
+from .. import text, util
from ..cache import cache
BASE_PATTERN = r"(?:https?://)?(?:www\.)?scrolller\.com"
@@ -82,9 +82,9 @@ class ScrolllerExtractor(Extractor):
try:
data = self._request_graphql("LoginQuery", variables, False)
- except exception.HttpError as exc:
+ except self.exc.HttpError as exc:
if exc.status == 403:
- raise exception.AuthenticationError()
+ raise self.exc.AuthenticationError()
raise
return data["login"]["token"]
@@ -206,7 +206,7 @@ class ScrolllerFollowingExtractor(ScrolllerExtractor):
self.login()
if not self.auth_token:
- raise exception.AuthorizationError("Login required")
+ raise self.exc.AuthorizationError("Login required")
variables = {
"iterator": None,
diff --git a/gallery_dl/extractor/seiga.py b/gallery_dl/extractor/seiga.py
index 8ea4f24a..f221ed89 100644
--- a/gallery_dl/extractor/seiga.py
+++ b/gallery_dl/extractor/seiga.py
@@ -9,7 +9,7 @@
"""Extractors for https://seiga.nicovideo.jp/"""
from .common import Extractor, Message
-from .. import text, util, exception
+from .. import text, util
from ..cache import cache
@@ -45,7 +45,7 @@ class SeigaExtractor(Extractor):
url = f"{self.root}/image/source/{image_id}"
location = self.request_location(url, notfound="image")
if "nicovideo.jp/login" in location:
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"HTTP redirect to login page ({location.partition('?')[0]})")
return location.replace("/o/", "/priv/", 1)
@@ -57,7 +57,7 @@ class SeigaExtractor(Extractor):
if username:
return self.cookies_update(self._login_impl(username, password))
- raise exception.AuthorizationError(
+ raise self.exc.AuthorizationError(
"username & password or 'user_session' cookie required")
@cache(maxage=365*86400, keyarg=1)
@@ -76,7 +76,7 @@ class SeigaExtractor(Extractor):
response = self.request(url, method="POST", data=data)
if "message=cant_login" in response.url:
- raise exception.AuthenticationError()
+ raise self.exc.AuthenticationError()
if "/mfa" in response.url:
page = response.text
@@ -93,7 +93,7 @@ class SeigaExtractor(Extractor):
if not response.history and \
b"Confirmation code is incorrect" in response.content:
- raise exception.AuthenticationError(
+ raise self.exc.AuthenticationError(
"Incorrect Confirmation Code")
return {
@@ -133,7 +133,7 @@ class SeigaUserExtractor(SeigaExtractor):
))[0]
if not data["name"] and "ユーザー情報が取得出来ませんでした" in page:
- raise exception.NotFoundError("user")
+ raise self.exc.NotFoundError("user")
return {
"user": {
diff --git a/gallery_dl/extractor/simplyhentai.py b/gallery_dl/extractor/simplyhentai.py
index 78d3daf9..01e45aa6 100644
--- a/gallery_dl/extractor/simplyhentai.py
+++ b/gallery_dl/extractor/simplyhentai.py
@@ -9,7 +9,7 @@
"""Extract hentai-manga from https://www.simply-hentai.com/"""
from .common import GalleryExtractor, Extractor, Message
-from .. import text, util, exception
+from .. import text, util
class SimplyhentaiGalleryExtractor(GalleryExtractor):
@@ -38,7 +38,7 @@ class SimplyhentaiGalleryExtractor(GalleryExtractor):
title = extr('
", " / "):
model_name = text.unescape(model_name)
@@ -86,7 +86,7 @@ class ThefapModelExtractor(ThefapExtractor):
page = self.request(url).text
if 'id="content"' not in page:
- raise exception.NotFoundError("model")
+ raise self.exc.NotFoundError("model")
if model_name := text.extr(page, "
"):
model_name = text.unescape(model_name[model_name.find(">")+1:])
diff --git a/gallery_dl/extractor/tiktok.py b/gallery_dl/extractor/tiktok.py
index 3b4c19af..3a272a9e 100644
--- a/gallery_dl/extractor/tiktok.py
+++ b/gallery_dl/extractor/tiktok.py
@@ -7,7 +7,7 @@
"""Extractors for https://www.tiktok.com/"""
from .common import Extractor, Message, Dispatch
-from .. import text, util, ytdl, exception
+from .. import text, util, ytdl
import functools
import itertools
import binascii
@@ -166,7 +166,7 @@ class TiktokExtractor(Extractor):
try:
response = self.request(url)
if response.history and "/login" in response.url:
- raise exception.AuthorizationError(
+ raise self.exc.AuthorizationError(
"HTTP redirect to login page "
f"('{response.url.partition('?')[0]}')")
html = response.text
@@ -227,14 +227,14 @@ class TiktokExtractor(Extractor):
data["webapp.app-context"]
data = data["webapp.user-detail"]
if not self._check_status_code(data, profile_url, "profile"):
- raise exception.ExtractionError(
+ raise self.exc.ExtractionError(
f"{profile_url}: could not extract rehydration data")
try:
for key in additional_keys:
data = data[key]
except KeyError as exc:
self.log.traceback(exc)
- raise exception.ExtractionError(
+ raise self.exc.ExtractionError(
"%s: could not extract rehydration data (%s)",
profile_url, ", ".join(additional_keys))
return data
@@ -258,7 +258,7 @@ class TiktokExtractor(Extractor):
if test.digest() == expected:
break
else:
- raise exception.ExtractionError("failed to find matching digest")
+ raise self.exc.ExtractionError("failed to find matching digest")
# extract cookie names
wci = text.extr(text.extr(html, 'id="wci"', '>'), 'class="', '"')
@@ -278,7 +278,7 @@ class TiktokExtractor(Extractor):
sec_uid = self._extract_id(
profile_url, user_name, r"MS4wLjABAAAA[\w-]{64}", "secUid")
if sec_uid is None:
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"{user_name}: unable to extract secondary user ID")
return sec_uid
@@ -286,7 +286,7 @@ class TiktokExtractor(Extractor):
author_id = self._extract_id(
profile_url, user_name, r"[0-9]+", "id")
if author_id is None:
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"{user_name}: unable to extract user ID")
return author_id
@@ -306,7 +306,7 @@ class TiktokExtractor(Extractor):
video = post["video"]
urls = self._extract_video_urls(video)
if not urls:
- raise exception.ExtractionError(
+ raise self.exc.ExtractionError(
f"{post['id']}: Failed to extract video URLs. "
f"You may need cookies to continue.")
@@ -533,7 +533,7 @@ class TiktokVmpostExtractor(TiktokExtractor):
url = self.request_location(url, headers=headers, notfound="post")
if not url or len(url) <= 28:
# https://www.tiktok.com/?_r=1
- raise exception.NotFoundError("post")
+ raise self.exc.NotFoundError("post")
data = {"_extractor": TiktokPostExtractor}
yield Message.Queue, url.partition("?")[0], data
@@ -944,7 +944,7 @@ class TiktokTimeCursor(TiktokPaginationCursor):
elif not self.reverse and (new_cursor < self.cursor or no_cursor):
new_cursor = self.fallback_cursor(data)
elif no_cursor:
- raise exception.ExtractionError("Could not extract next cursor")
+ raise self.exc.ExtractionError("Could not extract next cursor")
self.cursor = new_cursor
return not data.get(self.has_more_key, False)
@@ -1273,7 +1273,7 @@ class TiktokPaginationRequest:
extractor.log.warning("%s: TikTok API keeps sending the same "
"page. Taking measures to avoid an infinite "
"loop", url)
- raise exception.ExtractionError(
+ raise self.exc.ExtractionError(
"TikTok API keeps sending the same page")
diff --git a/gallery_dl/extractor/tumblr.py b/gallery_dl/extractor/tumblr.py
index bd597807..3cae41aa 100644
--- a/gallery_dl/extractor/tumblr.py
+++ b/gallery_dl/extractor/tumblr.py
@@ -9,7 +9,7 @@
"""Extractors for https://www.tumblr.com/"""
from .common import Extractor, Message
-from .. import text, util, dt, oauth, exception
+from .. import text, util, dt, oauth
BASE_PATTERN = (
@@ -473,7 +473,7 @@ class TumblrAPI(oauth.OAuth1API):
self.log.debug(data)
if status == 403:
- raise exception.AuthorizationError()
+ raise self.exc.AuthorizationError()
elif status == 404:
try:
@@ -492,8 +492,8 @@ class TumblrAPI(oauth.OAuth1API):
else:
self.log.info("Run 'gallery-dl oauth:tumblr' "
"to access dashboard-only blogs")
- raise exception.AuthorizationError(error)
- raise exception.NotFoundError("user or post")
+ raise self.exc.AuthorizationError(error)
+ raise self.exc.NotFoundError("user or post")
elif status == 429:
# daily rate limit
@@ -514,7 +514,7 @@ class TumblrAPI(oauth.OAuth1API):
continue
t = (dt.now() + dt.timedelta(0, float(reset))).time()
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"Aborting - Rate limit will reset at "
f"{t.hour:02}:{t.minute:02}:{t.second:02}")
@@ -524,7 +524,7 @@ class TumblrAPI(oauth.OAuth1API):
self.extractor.wait(seconds=reset)
continue
- raise exception.AbortExtraction(data)
+ raise self.exc.AbortExtraction(data)
def _pagination(self, endpoint, params,
blog=None, key="posts", cache=False):
diff --git a/gallery_dl/extractor/twibooru.py b/gallery_dl/extractor/twibooru.py
index 4558e212..844ed302 100644
--- a/gallery_dl/extractor/twibooru.py
+++ b/gallery_dl/extractor/twibooru.py
@@ -9,7 +9,7 @@
"""Extractors for https://twibooru.org/"""
from .booru import BooruExtractor
-from .. import text, exception
+from .. import text
import operator
BASE_PATTERN = r"(?:https?://)?(?:www\.)?twibooru\.org"
@@ -153,7 +153,7 @@ class TwibooruAPI():
# error
self.extractor.log.debug(response.content)
- raise exception.HttpError("", response)
+ raise self.exc.HttpError("", response)
def _pagination(self, endpoint, params):
extr = self.extractor
diff --git a/gallery_dl/extractor/twitter.py b/gallery_dl/extractor/twitter.py
index dc5cf11b..d89a7b68 100644
--- a/gallery_dl/extractor/twitter.py
+++ b/gallery_dl/extractor/twitter.py
@@ -9,7 +9,7 @@
"""Extractors for https://x.com/"""
from .common import Extractor, Message, Dispatch
-from .. import text, util, dt, exception
+from .. import text, util, dt
from ..cache import cache, memcache
import itertools
import random
@@ -906,7 +906,7 @@ class TwitterTimelineExtractor(TwitterExtractor):
return self.api.user_media
if strategy == "with_replies":
return self.api.user_tweets_and_replies
- raise exception.AbortExtraction(f"Invalid strategy '{strategy}'")
+ raise self.exc.AbortExtraction(f"Invalid strategy '{strategy}'")
class TwitterTweetsExtractor(TwitterExtractor):
@@ -1092,7 +1092,7 @@ class TwitterTweetExtractor(TwitterExtractor):
try:
self._assign_user(tweet["core"]["user_results"]["result"])
except KeyError:
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"'{tweet.get('reason') or 'Unavailable'}'")
yield tweet
@@ -1403,10 +1403,10 @@ class TwitterAPI():
if tweet.get("__typename") == "TweetUnavailable":
reason = tweet.get("reason")
if reason in {"NsfwViewerHasNoStatedAge", "NsfwLoggedOut"}:
- raise exception.AuthRequired(message="NSFW Tweet")
+ raise self.exc.AuthRequired(message="NSFW Tweet")
if reason == "Protected":
- raise exception.AuthRequired(message="Protected Tweet")
- raise exception.AbortExtraction(f"Tweet unavailable ('{reason}')")
+ raise self.exc.AuthRequired(message="Protected Tweet")
+ raise self.exc.AbortExtraction(f"Tweet unavailable ('{reason}')")
return tweet
@@ -1754,9 +1754,9 @@ class TwitterAPI():
return user["rest_id"]
except KeyError:
if user and user.get("__typename") == "UserUnavailable":
- raise exception.NotFoundError(user["message"], False)
+ raise self.exc.NotFoundError(user["message"], False)
else:
- raise exception.NotFoundError("user")
+ raise self.exc.NotFoundError("user")
@cache(maxage=3600)
def _guest_token(self):
@@ -1835,13 +1835,13 @@ class TwitterAPI():
if "this account is temporarily locked" in msg:
msg = "Account temporarily locked"
if self.extractor.config("locked") != "wait":
- raise exception.AuthorizationError(msg)
+ raise self.exc.AuthorizationError(msg)
self.log.warning(msg)
self.extractor.input("Press ENTER to retry.")
retry = True
elif "Could not authenticate you" in msg:
- raise exception.AbortExtraction(f"'{msg}'")
+ raise self.exc.AbortExtraction(f"'{msg}'")
elif msg.lower().startswith("timeout"):
retry = True
@@ -1858,7 +1858,7 @@ class TwitterAPI():
return data
elif response.status_code in {403, 404} and \
not self.headers["x-twitter-auth-type"]:
- raise exception.AuthRequired(
+ raise self.exc.AuthRequired(
"authenticated cookies", "timeline")
elif response.status_code == 429:
self._handle_ratelimit(response)
@@ -1870,7 +1870,7 @@ class TwitterAPI():
except Exception:
pass
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"{response.status_code} {response.reason} ({errors})")
def _pagination_rest(self, endpoint, params):
@@ -2065,13 +2065,13 @@ class TwitterAPI():
self.headers["x-twitter-auth-type"] = None
extr.log.info("Retrying API request as guest")
continue
- raise exception.AuthorizationError(
+ raise self.exc.AuthorizationError(
user["screen_name"] + " blocked your account")
elif user.get("protected"):
- raise exception.AuthorizationError(
+ raise self.exc.AuthorizationError(
user["screen_name"] + "'s Tweets are protected")
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
"Unable to retrieve Tweets from this timeline")
tweets = []
@@ -2301,7 +2301,7 @@ class TwitterAPI():
def _handle_ratelimit(self, response):
rl = self.extractor.config("ratelimit")
if rl == "abort":
- raise exception.AbortExtraction("Rate limit exceeded")
+ raise self.exc.AbortExtraction("Rate limit exceeded")
until = response.headers.get("x-rate-limit-reset")
seconds = None if until else 60.0
@@ -2313,7 +2313,7 @@ class TwitterAPI():
num = text.parse_int(num)
msg = f"Rate limit exceeded ({amt}/{num})"
if amt >= num:
- raise exception.AbortExtraction(msg)
+ raise self.exc.AbortExtraction(msg)
self.log.warning(msg)
self._ratelimit_amt = amt + 1
elif rl == "wait":
diff --git a/gallery_dl/extractor/urlgalleries.py b/gallery_dl/extractor/urlgalleries.py
index 0d8b3d38..4e3660de 100644
--- a/gallery_dl/extractor/urlgalleries.py
+++ b/gallery_dl/extractor/urlgalleries.py
@@ -7,7 +7,7 @@
"""Extractors for https://urlgalleries.net/"""
from .common import GalleryExtractor, Message
-from .. import text, exception
+from .. import text
class UrlgalleriesGalleryExtractor(GalleryExtractor):
@@ -29,8 +29,8 @@ class UrlgalleriesGalleryExtractor(GalleryExtractor):
if 300 <= response.status_code < 500:
if response.headers.get("location", "").endswith(
"/not_found_adult.php"):
- raise exception.NotFoundError("gallery")
- raise exception.HttpError(None, response)
+ raise self.exc.NotFoundError("gallery")
+ raise self.exc.HttpError(None, response)
page = response.text
imgs = self.images(page)
diff --git a/gallery_dl/extractor/urlshortener.py b/gallery_dl/extractor/urlshortener.py
index 7a9269d0..b8c9521e 100644
--- a/gallery_dl/extractor/urlshortener.py
+++ b/gallery_dl/extractor/urlshortener.py
@@ -7,7 +7,6 @@
"""Extractors for general-purpose URL shorteners"""
from .common import BaseExtractor, Message
-from .. import exception
class UrlshortenerExtractor(BaseExtractor):
@@ -40,5 +39,5 @@ class UrlshortenerLinkExtractor(UrlshortenerExtractor):
location = self.request_location(
url, headers=self.config_instance("headers"), notfound="URL")
if not location:
- raise exception.AbortExtraction("Unable to resolve short URL")
+ raise self.exc.AbortExtraction("Unable to resolve short URL")
yield Message.Queue, location, {}
diff --git a/gallery_dl/extractor/vipergirls.py b/gallery_dl/extractor/vipergirls.py
index 39817632..07f807dd 100644
--- a/gallery_dl/extractor/vipergirls.py
+++ b/gallery_dl/extractor/vipergirls.py
@@ -9,7 +9,7 @@
"""Extractors for https://vipergirls.to/"""
from .common import Extractor, Message
-from .. import text, util, exception
+from .. import text, util
from ..cache import cache
BASE_PATTERN = r"(?:https?://)?(?:www\.)?vipergirls\.to"
@@ -104,7 +104,7 @@ class VipergirlsExtractor(Extractor):
response = self.request(url, method="POST", data=data)
if not response.cookies.get("vg_password"):
- raise exception.AuthenticationError()
+ raise self.exc.AuthenticationError()
return {cookie.name: cookie.value
for cookie in response.cookies}
diff --git a/gallery_dl/extractor/vk.py b/gallery_dl/extractor/vk.py
index a9774e1c..95894e7a 100644
--- a/gallery_dl/extractor/vk.py
+++ b/gallery_dl/extractor/vk.py
@@ -9,7 +9,7 @@
"""Extractors for https://vk.com/"""
from .common import Extractor, Message
-from .. import text, exception
+from .. import text
BASE_PATTERN = r"(?:https://)?(?:www\.|m\.)?vk\.com"
@@ -100,13 +100,13 @@ class VkExtractor(Extractor):
response = self.request(
url, method="POST", headers=headers, data=data)
if response.history and "/challenge.html" in response.url:
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
"HTTP redirect to 'challenge' page:\n" + response.url)
payload = response.json()["payload"][1]
if len(payload) < 4:
self.log.debug(payload)
- raise exception.AuthorizationError(
+ raise self.exc.AuthorizationError(
text.unescape(payload[0]) if payload[0] else None)
total = payload[1]
diff --git a/gallery_dl/extractor/wallhaven.py b/gallery_dl/extractor/wallhaven.py
index 3025c566..041d4d2f 100644
--- a/gallery_dl/extractor/wallhaven.py
+++ b/gallery_dl/extractor/wallhaven.py
@@ -9,7 +9,7 @@
"""Extractors for https://wallhaven.cc/"""
from .common import Extractor, Message, Dispatch
-from .. import text, exception
+from .. import text
class WallhavenExtractor(Extractor):
@@ -199,7 +199,7 @@ class WallhavenAPI():
continue
self.extractor.log.debug("Server response: %s", response.text)
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"API request failed "
f"({response.status_code} {response.reason})")
diff --git a/gallery_dl/extractor/webtoons.py b/gallery_dl/extractor/webtoons.py
index b8fff9b3..d74fa0c9 100644
--- a/gallery_dl/extractor/webtoons.py
+++ b/gallery_dl/extractor/webtoons.py
@@ -10,7 +10,7 @@
"""Extractors for https://www.webtoons.com/"""
from .common import GalleryExtractor, Extractor, Message
-from .. import exception, text, util
+from .. import text, util
BASE_PATTERN = r"(?:https?://)?(?:www\.)?webtoons\.com"
LANG_PATTERN = BASE_PATTERN + r"/(([^/?#]+)"
@@ -40,7 +40,7 @@ class WebtoonsBase():
def request(self, url, **kwargs):
response = Extractor.request(self, url, **kwargs)
if response.history and "/ageGate" in response.url:
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"HTTP redirect to age gate check ('{response.url}')")
return response
diff --git a/gallery_dl/extractor/weibo.py b/gallery_dl/extractor/weibo.py
index 040d483f..076a6517 100644
--- a/gallery_dl/extractor/weibo.py
+++ b/gallery_dl/extractor/weibo.py
@@ -9,7 +9,7 @@
"""Extractors for https://www.weibo.com/"""
from .common import Extractor, Message, Dispatch
-from .. import text, util, exception
+from .. import text, util
from ..cache import cache
import random
@@ -65,7 +65,7 @@ class WeiboExtractor(Extractor):
if response.history:
if "login.sina.com" in response.url:
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f"HTTP redirect to login page "
f"({response.url.partition('?')[0]})")
if "passport.weibo.com" in response.url:
@@ -189,7 +189,7 @@ class WeiboExtractor(Extractor):
not text.ext_from_url(video["url"]):
try:
video["url"] = self.request_location(video["url"])
- except exception.HttpError as exc:
+ except self.exc.HttpError as exc:
self.log.warning("%s: %s", exc.__class__.__name__, exc)
video["url"] = ""
@@ -230,7 +230,7 @@ class WeiboExtractor(Extractor):
if not data.get("ok"):
self.log.debug(response.content)
if "since_id" not in params: # first iteration
- raise exception.AbortExtraction(
+ raise self.exc.AbortExtraction(
f'"{data.get("msg") or "unknown error"}"')
try:
@@ -479,14 +479,14 @@ class WeiboAlbumExtractor(WeiboExtractor):
try:
sub = subalbums[int(subalbum)-1]
except Exception:
- raise exception.NotFoundError("subalbum")
+ raise self.exc.NotFoundError("subalbum")
else:
subalbum = text.unquote(subalbum)
for sub in subalbums:
if sub["pic_title"] == subalbum:
break
else:
- raise exception.NotFoundError("subalbum")
+ raise self.exc.NotFoundError("subalbum")
return ((sub, self._pagination_subalbum(uid, sub)),)
def _pagination_subalbum(self, uid, sub):
@@ -504,7 +504,7 @@ class WeiboStatusExtractor(WeiboExtractor):
status = self._status_by_id(self.user)
if status.get("ok") != 1:
self.log.debug(status)
- raise exception.NotFoundError("status")
+ raise self.exc.NotFoundError("status")
return (status,)
diff --git a/gallery_dl/extractor/wikimedia.py b/gallery_dl/extractor/wikimedia.py
index 1be211c3..528ae95d 100644
--- a/gallery_dl/extractor/wikimedia.py
+++ b/gallery_dl/extractor/wikimedia.py
@@ -10,7 +10,7 @@
"""Extractors for Wikimedia sites"""
from .common import BaseExtractor, Message
-from .. import text, util, exception
+from .. import text, util
from ..cache import cache
@@ -71,7 +71,7 @@ class WikimediaExtractor(BaseExtractor):
response = self.request(url, method="HEAD", fatal=None)
if response.status_code < 400:
return url
- raise exception.AbortExtraction("Unable to find API endpoint")
+ raise self.exc.AbortExtraction("Unable to find API endpoint")
def prepare_info(self, info):
"""Adjust the content of an image info object"""
diff --git a/gallery_dl/extractor/xenforo.py b/gallery_dl/extractor/xenforo.py
index a199ff82..bafee2d4 100644
--- a/gallery_dl/extractor/xenforo.py
+++ b/gallery_dl/extractor/xenforo.py
@@ -9,7 +9,7 @@
"""Extractors for XenForo forums"""
from .common import BaseExtractor, Message
-from .. import text, util, exception
+from .. import text, util
from ..cache import cache
import binascii
@@ -165,7 +165,7 @@ class XenforoExtractor(BaseExtractor):
def request_page(self, url):
try:
return self.request(url)
- except exception.HttpError as exc:
+ except self.exc.HttpError as exc:
if exc.status == 403 and b">Log in<" in exc.response.content:
self._require_auth(exc.response)
raise
@@ -197,7 +197,7 @@ class XenforoExtractor(BaseExtractor):
if not response.history:
err = self._extract_error(response.text)
err = f'"{err}"' if err else None
- raise exception.AuthenticationError(err)
+ raise self.exc.AuthenticationError(err)
return {
cookie.name: cookie.value
@@ -420,7 +420,7 @@ class XenforoExtractor(BaseExtractor):
return main["contentUrl"], media
def _require_auth(self, response=None):
- raise exception.AuthRequired(
+ raise self.exc.AuthRequired(
("username & password", "authenticated cookies"), None,
None if response is None else self._extract_error(response.text))
@@ -473,7 +473,7 @@ class XenforoPostExtractor(XenforoExtractor):
pos = page.find(f'data-content="post-{post_id}"')
if pos < 0:
- raise exception.NotFoundError("post")
+ raise self.exc.NotFoundError("post")
html = text.extract(page, "", "<"):
self.log.warning(f"'{msg}'")