mirror of
https://github.com/Benexl/FastAnime.git
synced 2025-12-12 07:40:41 -08:00
chore: make some packages optional and cleanup deprecated
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
from rich.progress import Progress
|
||||
from thefuzz import fuzz
|
||||
from .....core.utils.fuzzy import fuzz
|
||||
|
||||
from .....libs.provider.anime.params import SearchParams
|
||||
from .....libs.provider.anime.types import SearchResult
|
||||
|
||||
@@ -34,7 +34,7 @@ def get_anime_titles(query: str, variables: dict = {}):
|
||||
Returns:
|
||||
a boolean indicating success and none or an anilist object depending on success
|
||||
"""
|
||||
from requests import post
|
||||
from httpx import post
|
||||
|
||||
try:
|
||||
response = post(
|
||||
|
||||
@@ -6,7 +6,7 @@ import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import requests
|
||||
from httpx import get
|
||||
from rich import print
|
||||
|
||||
from ...core.constants import AUTHOR, GIT_REPO, PROJECT_NAME_LOWER, __version__
|
||||
@@ -17,7 +17,7 @@ API_URL = f"https://api.{GIT_REPO}/repos/{AUTHOR}/{PROJECT_NAME_LOWER}/releases/
|
||||
def check_for_updates():
|
||||
USER_AGENT = f"{PROJECT_NAME_LOWER} user"
|
||||
try:
|
||||
request = requests.get(
|
||||
response = get(
|
||||
API_URL,
|
||||
headers={
|
||||
"User-Agent": USER_AGENT,
|
||||
@@ -29,8 +29,8 @@ def check_for_updates():
|
||||
print("You are not connected to the internet")
|
||||
return True, {}
|
||||
|
||||
if request.status_code == 200:
|
||||
release_json = request.json()
|
||||
if response.status_code == 200:
|
||||
release_json = response.json()
|
||||
remote_tag = list(
|
||||
map(int, release_json["tag_name"].replace("v", "").split("."))
|
||||
)
|
||||
@@ -51,7 +51,7 @@ def check_for_updates():
|
||||
return (is_latest, release_json)
|
||||
else:
|
||||
print("Failed to check for updates")
|
||||
print(request.text)
|
||||
print(response.text)
|
||||
return (True, {})
|
||||
|
||||
|
||||
|
||||
480
fastanime/core/utils/fuzzy.py
Normal file
480
fastanime/core/utils/fuzzy.py
Normal file
@@ -0,0 +1,480 @@
|
||||
"""
|
||||
Fuzzy string matching utilities with fallback implementation.
|
||||
|
||||
This module provides a fuzzy matching class that uses thefuzz if available,
|
||||
otherwise falls back to a pure Python implementation with the same API.
|
||||
|
||||
Usage:
|
||||
Basic usage with the convenience functions:
|
||||
|
||||
>>> from fastanime.core.utils.fuzzy import fuzz
|
||||
>>> fuzz.ratio("hello world", "hello")
|
||||
62
|
||||
>>> fuzz.partial_ratio("hello world", "hello")
|
||||
100
|
||||
|
||||
Using the FuzzyMatcher class directly:
|
||||
|
||||
>>> from fastanime.core.utils.fuzzy import FuzzyMatcher
|
||||
>>> matcher = FuzzyMatcher()
|
||||
>>> matcher.backend
|
||||
'thefuzz' # or 'pure_python' if thefuzz is not available
|
||||
>>> matcher.token_sort_ratio("fuzzy wuzzy", "wuzzy fuzzy")
|
||||
100
|
||||
|
||||
For drop-in replacement of thefuzz.fuzz:
|
||||
|
||||
>>> from fastanime.core.utils.fuzzy import ratio, partial_ratio
|
||||
>>> ratio("test", "best")
|
||||
75
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Try to import thefuzz, fall back to pure Python implementation
|
||||
try:
|
||||
from thefuzz import fuzz as _fuzz_impl
|
||||
THEFUZZ_AVAILABLE = True
|
||||
logger.debug("Using thefuzz for fuzzy matching")
|
||||
except ImportError:
|
||||
_fuzz_impl = None
|
||||
THEFUZZ_AVAILABLE = False
|
||||
logger.debug("thefuzz not available, using fallback implementation")
|
||||
|
||||
|
||||
class _PurePythonFuzz:
|
||||
"""
|
||||
Pure Python implementation of fuzzy string matching algorithms.
|
||||
|
||||
This provides the same API as thefuzz.fuzz but with pure Python implementations
|
||||
of the core algorithms.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _levenshtein_distance(s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate the Levenshtein distance between two strings.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
The Levenshtein distance as an integer
|
||||
"""
|
||||
if len(s1) < len(s2):
|
||||
return _PurePythonFuzz._levenshtein_distance(s2, s1)
|
||||
|
||||
if len(s2) == 0:
|
||||
return len(s1)
|
||||
|
||||
previous_row = list(range(len(s2) + 1))
|
||||
for i, c1 in enumerate(s1):
|
||||
current_row = [i + 1]
|
||||
for j, c2 in enumerate(s2):
|
||||
# Cost of insertions, deletions and substitutions
|
||||
insertions = previous_row[j + 1] + 1
|
||||
deletions = current_row[j] + 1
|
||||
substitutions = previous_row[j] + (c1 != c2)
|
||||
current_row.append(min(insertions, deletions, substitutions))
|
||||
previous_row = current_row
|
||||
|
||||
return previous_row[-1]
|
||||
|
||||
@staticmethod
|
||||
def _longest_common_subsequence(s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate the length of the longest common subsequence.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Length of the longest common subsequence
|
||||
"""
|
||||
m, n = len(s1), len(s2)
|
||||
dp = [[0] * (n + 1) for _ in range(m + 1)]
|
||||
|
||||
for i in range(1, m + 1):
|
||||
for j in range(1, n + 1):
|
||||
if s1[i - 1] == s2[j - 1]:
|
||||
dp[i][j] = dp[i - 1][j - 1] + 1
|
||||
else:
|
||||
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
|
||||
|
||||
return dp[m][n]
|
||||
|
||||
@staticmethod
|
||||
def _normalize_string(s: str) -> str:
|
||||
"""
|
||||
Normalize a string for comparison by converting to lowercase and stripping whitespace.
|
||||
|
||||
Args:
|
||||
s: String to normalize
|
||||
|
||||
Returns:
|
||||
Normalized string
|
||||
"""
|
||||
return s.lower().strip()
|
||||
|
||||
@staticmethod
|
||||
def ratio(s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate the similarity ratio between two strings using Levenshtein distance.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Similarity ratio as an integer from 0 to 100
|
||||
"""
|
||||
if not s1 and not s2:
|
||||
return 100
|
||||
if not s1 or not s2:
|
||||
return 0
|
||||
|
||||
distance = _PurePythonFuzz._levenshtein_distance(s1, s2)
|
||||
max_len = max(len(s1), len(s2))
|
||||
|
||||
if max_len == 0:
|
||||
return 100
|
||||
|
||||
similarity = (max_len - distance) / max_len
|
||||
return int(similarity * 100)
|
||||
|
||||
@staticmethod
|
||||
def partial_ratio(s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate the partial similarity ratio between two strings.
|
||||
|
||||
This finds the best matching substring and calculates the ratio for that.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Partial similarity ratio as an integer from 0 to 100
|
||||
"""
|
||||
if not s1 or not s2:
|
||||
return 0
|
||||
|
||||
if len(s1) <= len(s2):
|
||||
shorter, longer = s1, s2
|
||||
else:
|
||||
shorter, longer = s2, s1
|
||||
|
||||
best_ratio = 0
|
||||
for i in range(len(longer) - len(shorter) + 1):
|
||||
substring = longer[i:i + len(shorter)]
|
||||
ratio = _PurePythonFuzz.ratio(shorter, substring)
|
||||
best_ratio = max(best_ratio, ratio)
|
||||
|
||||
return best_ratio
|
||||
|
||||
@staticmethod
|
||||
def token_sort_ratio(s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate similarity after sorting tokens in both strings.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Token sort ratio as an integer from 0 to 100
|
||||
"""
|
||||
if not s1 or not s2:
|
||||
return 0
|
||||
|
||||
# Normalize and split into tokens
|
||||
tokens1 = sorted(_PurePythonFuzz._normalize_string(s1).split())
|
||||
tokens2 = sorted(_PurePythonFuzz._normalize_string(s2).split())
|
||||
|
||||
# Rejoin sorted tokens
|
||||
sorted_s1 = ' '.join(tokens1)
|
||||
sorted_s2 = ' '.join(tokens2)
|
||||
|
||||
return _PurePythonFuzz.ratio(sorted_s1, sorted_s2)
|
||||
|
||||
@staticmethod
|
||||
def token_set_ratio(s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate similarity using set operations on tokens.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Token set ratio as an integer from 0 to 100
|
||||
"""
|
||||
if not s1 or not s2:
|
||||
return 0
|
||||
|
||||
# Normalize and split into tokens
|
||||
tokens1 = set(_PurePythonFuzz._normalize_string(s1).split())
|
||||
tokens2 = set(_PurePythonFuzz._normalize_string(s2).split())
|
||||
|
||||
# Find intersection and differences
|
||||
intersection = tokens1 & tokens2
|
||||
diff1 = tokens1 - tokens2
|
||||
diff2 = tokens2 - tokens1
|
||||
|
||||
# Create sorted strings for comparison
|
||||
sorted_intersection = ' '.join(sorted(intersection))
|
||||
sorted_diff1 = ' '.join(sorted(diff1))
|
||||
sorted_diff2 = ' '.join(sorted(diff2))
|
||||
|
||||
# Combine strings for comparison
|
||||
combined1 = f"{sorted_intersection} {sorted_diff1}".strip()
|
||||
combined2 = f"{sorted_intersection} {sorted_diff2}".strip()
|
||||
|
||||
if not combined1 and not combined2:
|
||||
return 100
|
||||
if not combined1 or not combined2:
|
||||
return 0
|
||||
|
||||
return _PurePythonFuzz.ratio(combined1, combined2)
|
||||
|
||||
@staticmethod
|
||||
def partial_token_sort_ratio(s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate partial similarity after sorting tokens.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Partial token sort ratio as an integer from 0 to 100
|
||||
"""
|
||||
if not s1 or not s2:
|
||||
return 0
|
||||
|
||||
# Normalize and split into tokens
|
||||
tokens1 = sorted(_PurePythonFuzz._normalize_string(s1).split())
|
||||
tokens2 = sorted(_PurePythonFuzz._normalize_string(s2).split())
|
||||
|
||||
# Rejoin sorted tokens
|
||||
sorted_s1 = ' '.join(tokens1)
|
||||
sorted_s2 = ' '.join(tokens2)
|
||||
|
||||
return _PurePythonFuzz.partial_ratio(sorted_s1, sorted_s2)
|
||||
|
||||
@staticmethod
|
||||
def partial_token_set_ratio(s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate partial similarity using set operations on tokens.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Partial token set ratio as an integer from 0 to 100
|
||||
"""
|
||||
if not s1 or not s2:
|
||||
return 0
|
||||
|
||||
# Normalize and split into tokens
|
||||
tokens1 = set(_PurePythonFuzz._normalize_string(s1).split())
|
||||
tokens2 = set(_PurePythonFuzz._normalize_string(s2).split())
|
||||
|
||||
# Find intersection and differences
|
||||
intersection = tokens1 & tokens2
|
||||
diff1 = tokens1 - tokens2
|
||||
diff2 = tokens2 - tokens1
|
||||
|
||||
# Create sorted strings for comparison
|
||||
sorted_intersection = ' '.join(sorted(intersection))
|
||||
sorted_diff1 = ' '.join(sorted(diff1))
|
||||
sorted_diff2 = ' '.join(sorted(diff2))
|
||||
|
||||
# Combine strings for comparison
|
||||
combined1 = f"{sorted_intersection} {sorted_diff1}".strip()
|
||||
combined2 = f"{sorted_intersection} {sorted_diff2}".strip()
|
||||
|
||||
if not combined1 and not combined2:
|
||||
return 100
|
||||
if not combined1 or not combined2:
|
||||
return 0
|
||||
|
||||
return _PurePythonFuzz.partial_ratio(combined1, combined2)
|
||||
|
||||
|
||||
class FuzzyMatcher:
|
||||
"""
|
||||
Fuzzy string matching class with the same API as thefuzz.fuzz.
|
||||
|
||||
This class automatically uses thefuzz if available, otherwise falls back
|
||||
to a pure Python implementation.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the fuzzy matcher with the appropriate backend."""
|
||||
if THEFUZZ_AVAILABLE and _fuzz_impl is not None:
|
||||
self._impl = _fuzz_impl
|
||||
self._backend = "thefuzz"
|
||||
else:
|
||||
self._impl = _PurePythonFuzz
|
||||
self._backend = "pure_python"
|
||||
|
||||
logger.debug(f"FuzzyMatcher initialized with backend: {self._backend}")
|
||||
|
||||
@property
|
||||
def backend(self) -> str:
|
||||
"""Get the name of the backend being used."""
|
||||
return self._backend
|
||||
|
||||
def ratio(self, s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate the similarity ratio between two strings.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Similarity ratio as an integer from 0 to 100
|
||||
"""
|
||||
try:
|
||||
return self._impl.ratio(s1, s2)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error in ratio calculation: {e}")
|
||||
return 0
|
||||
|
||||
def partial_ratio(self, s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate the partial similarity ratio between two strings.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Partial similarity ratio as an integer from 0 to 100
|
||||
"""
|
||||
try:
|
||||
return self._impl.partial_ratio(s1, s2)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error in partial_ratio calculation: {e}")
|
||||
return 0
|
||||
|
||||
def token_sort_ratio(self, s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate similarity after sorting tokens in both strings.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Token sort ratio as an integer from 0 to 100
|
||||
"""
|
||||
try:
|
||||
return self._impl.token_sort_ratio(s1, s2)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error in token_sort_ratio calculation: {e}")
|
||||
return 0
|
||||
|
||||
def token_set_ratio(self, s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate similarity using set operations on tokens.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Token set ratio as an integer from 0 to 100
|
||||
"""
|
||||
try:
|
||||
return self._impl.token_set_ratio(s1, s2)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error in token_set_ratio calculation: {e}")
|
||||
return 0
|
||||
|
||||
def partial_token_sort_ratio(self, s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate partial similarity after sorting tokens.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Partial token sort ratio as an integer from 0 to 100
|
||||
"""
|
||||
try:
|
||||
return self._impl.partial_token_sort_ratio(s1, s2)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error in partial_token_sort_ratio calculation: {e}")
|
||||
return 0
|
||||
|
||||
def partial_token_set_ratio(self, s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate partial similarity using set operations on tokens.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Partial token set ratio as an integer from 0 to 100
|
||||
"""
|
||||
try:
|
||||
return self._impl.partial_token_set_ratio(s1, s2)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error in partial_token_set_ratio calculation: {e}")
|
||||
return 0
|
||||
|
||||
def best_ratio(self, s1: str, s2: str) -> int:
|
||||
"""
|
||||
Get the best ratio from all available methods.
|
||||
|
||||
Args:
|
||||
s1: First string
|
||||
s2: Second string
|
||||
|
||||
Returns:
|
||||
Best similarity ratio as an integer from 0 to 100
|
||||
"""
|
||||
ratios = [
|
||||
self.ratio(s1, s2),
|
||||
self.partial_ratio(s1, s2),
|
||||
self.token_sort_ratio(s1, s2),
|
||||
self.token_set_ratio(s1, s2),
|
||||
self.partial_token_sort_ratio(s1, s2),
|
||||
self.partial_token_set_ratio(s1, s2),
|
||||
]
|
||||
return max(ratios)
|
||||
|
||||
|
||||
# Create a default instance for convenience
|
||||
fuzz = FuzzyMatcher()
|
||||
|
||||
# Export the functions for drop-in replacement of thefuzz.fuzz
|
||||
ratio = fuzz.ratio
|
||||
partial_ratio = fuzz.partial_ratio
|
||||
token_sort_ratio = fuzz.token_sort_ratio
|
||||
token_set_ratio = fuzz.token_set_ratio
|
||||
partial_token_sort_ratio = fuzz.partial_token_sort_ratio
|
||||
partial_token_set_ratio = fuzz.partial_token_set_ratio
|
||||
|
||||
__all__ = [
|
||||
'FuzzyMatcher',
|
||||
'fuzz',
|
||||
'ratio',
|
||||
'partial_ratio',
|
||||
'token_sort_ratio',
|
||||
'token_set_ratio',
|
||||
'partial_token_sort_ratio',
|
||||
'partial_token_set_ratio',
|
||||
'THEFUZZ_AVAILABLE',
|
||||
]
|
||||
@@ -1,4 +1,4 @@
|
||||
import requests
|
||||
from httpx import get
|
||||
|
||||
ANISKIP_ENDPOINT = "https://api.aniskip.com/v1/skip-times"
|
||||
|
||||
@@ -10,7 +10,7 @@ class AniSkip:
|
||||
cls, mal_id: int, episode_number: float | int, types=["op", "ed"]
|
||||
):
|
||||
url = f"{ANISKIP_ENDPOINT}/{mal_id}/{episode_number}?types=op&types=ed"
|
||||
response = requests.get(url)
|
||||
response = get(url)
|
||||
print(response.text)
|
||||
return response.json()
|
||||
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
import requests
|
||||
from yt_dlp.utils.networking import random_user_agent
|
||||
from httpx import Client
|
||||
from ....core.utils.networking import random_user_agent
|
||||
|
||||
|
||||
class MangaProvider:
|
||||
session: requests.Session
|
||||
session: Client
|
||||
|
||||
USER_AGENT = random_user_agent()
|
||||
HEADERS = {}
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.session = requests.session()
|
||||
self.session.headers.update({"User-Agent": self.USER_AGENT, **self.HEADERS})
|
||||
self.session = Client(
|
||||
headers={
|
||||
"User-Agent": self.USER_AGENT,
|
||||
**self.HEADERS,
|
||||
},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import logging
|
||||
|
||||
from requests import get
|
||||
from httpx import get
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -9,7 +9,10 @@ Python's built-in html.parser or lxml for better performance when available.
|
||||
import logging
|
||||
import re
|
||||
from html.parser import HTMLParser as BaseHTMLParser
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from lxml import etree
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -54,7 +57,7 @@ class HTMLParser:
|
||||
"""Initialize the HTML parser with configuration."""
|
||||
self.config = config or HTMLParserConfig()
|
||||
|
||||
def parse(self, html_content: str) -> Union[etree._Element, 'ParsedHTML']:
|
||||
def parse(self, html_content: str) -> Union[Any, 'ParsedHTML']:
|
||||
"""
|
||||
Parse HTML content and return a parsed tree.
|
||||
|
||||
@@ -69,7 +72,7 @@ class HTMLParser:
|
||||
else:
|
||||
return self._parse_with_builtin(html_content)
|
||||
|
||||
def _parse_with_lxml(self, html_content: str) -> etree._Element:
|
||||
def _parse_with_lxml(self, html_content: str) -> Any:
|
||||
"""Parse HTML using lxml."""
|
||||
try:
|
||||
# Use lxml's HTML parser which is more lenient
|
||||
@@ -230,7 +233,7 @@ def get_element_by_id(element_id: str, html_content: str) -> Optional[str]:
|
||||
"""
|
||||
parsed = _default_parser.parse(html_content)
|
||||
|
||||
if _default_parser.config.use_lxml:
|
||||
if _default_parser.config.use_lxml and HAS_LXML:
|
||||
try:
|
||||
element = parsed.xpath(f'//*[@id="{element_id}"]')
|
||||
if element:
|
||||
@@ -259,7 +262,7 @@ def get_element_by_tag(tag_name: str, html_content: str) -> Optional[str]:
|
||||
"""
|
||||
parsed = _default_parser.parse(html_content)
|
||||
|
||||
if _default_parser.config.use_lxml:
|
||||
if _default_parser.config.use_lxml and HAS_LXML:
|
||||
try:
|
||||
elements = parsed.xpath(f'//{tag_name}')
|
||||
if elements:
|
||||
@@ -288,7 +291,7 @@ def get_element_by_class(class_name: str, html_content: str) -> Optional[str]:
|
||||
"""
|
||||
parsed = _default_parser.parse(html_content)
|
||||
|
||||
if _default_parser.config.use_lxml:
|
||||
if _default_parser.config.use_lxml and HAS_LXML:
|
||||
try:
|
||||
elements = parsed.xpath(f'//*[contains(@class, "{class_name}")]')
|
||||
if elements:
|
||||
@@ -318,7 +321,7 @@ def get_elements_by_tag(tag_name: str, html_content: str) -> List[str]:
|
||||
parsed = _default_parser.parse(html_content)
|
||||
results = []
|
||||
|
||||
if _default_parser.config.use_lxml:
|
||||
if _default_parser.config.use_lxml and HAS_LXML:
|
||||
try:
|
||||
elements = parsed.xpath(f'//{tag_name}')
|
||||
for element in elements:
|
||||
@@ -347,7 +350,7 @@ def get_elements_by_class(class_name: str, html_content: str) -> List[str]:
|
||||
parsed = _default_parser.parse(html_content)
|
||||
results = []
|
||||
|
||||
if _default_parser.config.use_lxml:
|
||||
if _default_parser.config.use_lxml and HAS_LXML:
|
||||
try:
|
||||
elements = parsed.xpath(f'//*[contains(@class, "{class_name}")]')
|
||||
for element in elements:
|
||||
@@ -396,7 +399,7 @@ def get_element_text_and_html_by_tag(tag_name: str, html_content: str) -> Tuple[
|
||||
"""
|
||||
parsed = _default_parser.parse(html_content)
|
||||
|
||||
if _default_parser.config.use_lxml:
|
||||
if _default_parser.config.use_lxml and HAS_LXML:
|
||||
try:
|
||||
elements = parsed.xpath(f'//{tag_name}')
|
||||
if elements:
|
||||
|
||||
@@ -6,29 +6,30 @@ license = "UNLICENSE"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
"beautifulsoup4>=4.13.4",
|
||||
"click>=8.1.7",
|
||||
"httpx>=0.28.1",
|
||||
"inquirerpy>=0.3.4",
|
||||
"libtorrent>=2.0.11",
|
||||
"lxml>=6.0.0",
|
||||
"pycryptodome>=3.21.0",
|
||||
"pydantic>=2.11.7",
|
||||
"pypresence>=4.3.0",
|
||||
"requests>=2.32.3",
|
||||
"rich>=13.9.2",
|
||||
"thefuzz>=0.22.1",
|
||||
"yt-dlp[default]>=2024.10.7",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
fastanime = 'fastanime:Cli'
|
||||
|
||||
[project.optional-dependencies]
|
||||
standard = ["fastapi[standard]>=0.115.0", "mpv>=1.0.7", "plyer>=2.1.0"]
|
||||
api = ["fastapi[standard]>=0.115.0"]
|
||||
standard = [
|
||||
"mpv>=1.0.7",
|
||||
"plyer>=2.1.0",
|
||||
"libtorrent>=2.0.11",
|
||||
"lxml>=6.0.0",
|
||||
"pypresence>=4.3.0",
|
||||
"thefuzz>=0.22.1",
|
||||
]
|
||||
notifications = ["plyer>=2.1.0"]
|
||||
mpv = ["mpv>=1.0.7"]
|
||||
torrent = ["libtorrent>=2.0.11"]
|
||||
lxml = ["lxml>=6.0.0"]
|
||||
discord = ["pypresence>=4.3.0"]
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
|
||||
Reference in New Issue
Block a user