feat: add debug mode for providers

This commit is contained in:
Benex254
2024-09-22 20:30:41 +03:00
parent 83cbef40f6
commit 1a87375ccd
10 changed files with 610 additions and 630 deletions

View File

@@ -66,13 +66,9 @@ class AnimeProvider:
[TODO:return]
"""
anime_provider = self.anime_provider
try:
results = anime_provider.search_for_anime(
user_query, translation_type, nsfw, unknown
)
except Exception as e:
logger.error(f"[ANIMEPROVIDER-ERROR]: {e}")
results = None
results = anime_provider.search_for_anime(
user_query, translation_type, nsfw, unknown
)
return results
@@ -90,12 +86,8 @@ class AnimeProvider:
[TODO:return]
"""
anime_provider = self.anime_provider
try:
results = anime_provider.get_anime(anime_id)
except Exception as e:
logger.error(f"[ANIMEPROVIDER-ERROR]: {e}")
results = anime_provider.get_anime(anime_id)
results = None
return results
def get_episode_streams(
@@ -116,12 +108,5 @@ class AnimeProvider:
[TODO:return]
"""
anime_provider = self.anime_provider
try:
results = anime_provider.get_episode_streams(
anime, episode, translation_type
)
except Exception as e:
logger.error(f"[ANIMEPROVIDER-ERROR]: {e}")
results = None
results = anime_provider.get_episode_streams(anime, episode, translation_type)
return results

View File

@@ -1,11 +1,10 @@
from .allanime.constants import SERVERS_AVAILABLE as ALLANIME_SERVERS
from .animepahe.constants import SERVERS_AVAILABLE as ANIMEPAHESERVERS
from .aniwatch.constants import SERVERS_AVAILABLE as ANIWATCHSERVERS
from .hianime.constants import SERVERS_AVAILABLE as ANIWATCHSERVERS
anime_sources = {
"allanime": "api.AllAnimeAPI",
"animepahe": "api.AnimePaheApi",
"aniwatch": "api.AniWatchApi",
"aniwave": "api.AniWaveApi",
"hianime": "api.HiAnimeApi",
}
SERVERS_AVAILABLE = [*ALLANIME_SERVERS, *ANIMEPAHESERVERS, *ANIWATCHSERVERS]

View File

@@ -7,9 +7,8 @@ import json
import logging
from typing import TYPE_CHECKING
from requests.exceptions import Timeout
from ...anime_provider.base_provider import AnimeProvider
from ..decorators import debug_provider
from ..utils import give_random_quality, one_digit_symmetric_xor
from .constants import ALLANIME_API_ENDPOINT, ALLANIME_BASE, ALLANIME_REFERER
from .gql_queries import ALLANIME_EPISODES_GQL, ALLANIME_SEARCH_GQL, ALLANIME_SHOW_GQL
@@ -42,29 +41,21 @@ class AllAnimeAPI(AnimeProvider):
Returns:
[TODO:return]
"""
try:
response = self.session.get(
self.api_endpoint,
params={
"variables": json.dumps(variables),
"query": query,
},
timeout=10,
)
if response.ok:
return response.json()["data"]
else:
logger.error("[ALLANIME-ERROR]: ", response.text)
return {}
except Timeout:
logger.error(
"[ALLANIME-ERROR]: Timeout exceeded this could mean allanime is down or you have lost internet connection"
)
return {}
except Exception as e:
logger.error(f"[ALLANIME-ERROR]: {e}")
response = self.session.get(
self.api_endpoint,
params={
"variables": json.dumps(variables),
"query": query,
},
timeout=10,
)
if response.ok:
return response.json()["data"]
else:
logger.error("[ALLANIME-ERROR]: ", response.text)
return {}
@debug_provider("ALLANIME")
def search_for_anime(
self,
user_query: str,
@@ -97,29 +88,25 @@ class AllAnimeAPI(AnimeProvider):
"translationtype": translationtype,
"countryorigin": countryorigin,
}
try:
search_results = self._fetch_gql(ALLANIME_SEARCH_GQL, variables)
page_info = search_results["shows"]["pageInfo"]
results = []
for result in search_results["shows"]["edges"]:
normalized_result = {
"id": result["_id"],
"title": result["name"],
"type": result["__typename"],
"availableEpisodes": result["availableEpisodes"],
}
results.append(normalized_result)
normalized_search_results = {
"pageInfo": page_info,
"results": results,
search_results = self._fetch_gql(ALLANIME_SEARCH_GQL, variables)
page_info = search_results["shows"]["pageInfo"]
results = []
for result in search_results["shows"]["edges"]:
normalized_result = {
"id": result["_id"],
"title": result["name"],
"type": result["__typename"],
"availableEpisodes": result["availableEpisodes"],
}
return normalized_search_results
results.append(normalized_result)
except Exception as e:
logger.error(f"[ALLANIME-ERROR]: {e}")
return {}
normalized_search_results = {
"pageInfo": page_info,
"results": results,
}
return normalized_search_results
@debug_provider("ALLANIME")
def get_anime(self, allanime_show_id: str):
"""get an anime details given its id
@@ -130,23 +117,20 @@ class AllAnimeAPI(AnimeProvider):
[TODO:return]
"""
variables = {"showId": allanime_show_id}
try:
anime = self._fetch_gql(ALLANIME_SHOW_GQL, variables)
id: str = anime["show"]["_id"]
title: str = anime["show"]["name"]
availableEpisodesDetail = anime["show"]["availableEpisodesDetail"]
type = anime.get("__typename")
normalized_anime = {
"id": id,
"title": title,
"availableEpisodesDetail": availableEpisodesDetail,
"type": type,
}
return normalized_anime
except Exception as e:
logger.error(f"[ALLANIME-ERROR]: {e}")
return {}
anime = self._fetch_gql(ALLANIME_SHOW_GQL, variables)
id: str = anime["show"]["_id"]
title: str = anime["show"]["name"]
availableEpisodesDetail = anime["show"]["availableEpisodesDetail"]
type = anime.get("__typename")
normalized_anime = {
"id": id,
"title": title,
"availableEpisodesDetail": availableEpisodesDetail,
"type": type,
}
return normalized_anime
@debug_provider("ALLANIME")
def _get_anime_episode(
self, allanime_show_id: str, episode_string: str, translation_type: str = "sub"
) -> "AllAnimeEpisode | dict":
@@ -165,13 +149,10 @@ class AllAnimeAPI(AnimeProvider):
"translationType": translation_type,
"episodeString": episode_string,
}
try:
episode = self._fetch_gql(ALLANIME_EPISODES_GQL, variables)
return episode["episode"]
except Exception as e:
logger.error(f"[ALLANIME-ERROR]: {e}")
return {}
episode = self._fetch_gql(ALLANIME_EPISODES_GQL, variables)
return episode["episode"]
@debug_provider("ALLANIME")
def get_episode_streams(self, anime, episode_number: str, translation_type="sub"):
"""get the streams of an episode
@@ -191,125 +172,117 @@ class AllAnimeAPI(AnimeProvider):
return []
embeds = allanime_episode["sourceUrls"]
try:
for embed in embeds:
try:
# filter the working streams no need to get all since the others are mostly hsl
# TODO: should i just get all the servers and handle the hsl??
if embed.get("sourceName", "") not in (
# priorities based on death note
"Sak", # 7
"S-mp4", # 7.9
"Luf-mp4", # 7.7
"Default", # 8.5
"Yt-mp4", # 7.9
"Kir", # NA
# "Vid-mp4" # 4
# "Ok", # 3.5
# "Ss-Hls", # 5.5
# "Mp4", # 4
):
continue
url = embed.get("sourceUrl")
#
if not url:
continue
if url.startswith("--"):
url = url[2:]
url = one_digit_symmetric_xor(56, url)
if "tools.fast4speed.rsvp" in url:
yield {
"server": "Yt",
"episode_title": f'{anime["title"]}; Episode {episode_number}',
"headers": {"Referer": f"https://{ALLANIME_BASE}/"},
"subtitles": [],
"links": [
{
"link": url,
"quality": "1080",
}
],
@debug_provider("ALLANIME")
def _get_server(embed):
# filter the working streams no need to get all since the others are mostly hsl
# TODO: should i just get all the servers and handle the hsl??
if embed.get("sourceName", "") not in (
# priorities based on death note
"Sak", # 7
"S-mp4", # 7.9
"Luf-mp4", # 7.7
"Default", # 8.5
"Yt-mp4", # 7.9
"Kir", # NA
# "Vid-mp4" # 4
# "Ok", # 3.5
# "Ss-Hls", # 5.5
# "Mp4", # 4
):
return
url = embed.get("sourceUrl")
#
if not url:
return
if url.startswith("--"):
url = url[2:]
url = one_digit_symmetric_xor(56, url)
if "tools.fast4speed.rsvp" in url:
return {
"server": "Yt",
"episode_title": f'{anime["title"]}; Episode {episode_number}',
"headers": {"Referer": f"https://{ALLANIME_BASE}/"},
"subtitles": [],
"links": [
{
"link": url,
"quality": "1080",
}
continue
],
}
# get the stream url for an episode of the defined source names
embed_url = (
f"https://{ALLANIME_BASE}{url.replace('clock', 'clock.json')}"
)
resp = self.session.get(
embed_url,
timeout=10,
)
# get the stream url for an episode of the defined source names
embed_url = f"https://{ALLANIME_BASE}{url.replace('clock', 'clock.json')}"
resp = self.session.get(
embed_url,
timeout=10,
)
if resp.ok:
match embed["sourceName"]:
case "Luf-mp4":
logger.debug("allanime:Found streams from gogoanime")
yield {
"server": "gogoanime",
"headers": {},
"subtitles": [],
"episode_title": (
allanime_episode["notes"] or f'{anime["title"]}'
)
+ f"; Episode {episode_number}",
"links": give_random_quality(resp.json()["links"]),
}
case "Kir":
logger.debug("allanime:Found streams from wetransfer")
yield {
"server": "wetransfer",
"headers": {},
"subtitles": [],
"episode_title": (
allanime_episode["notes"] or f'{anime["title"]}'
)
+ f"; Episode {episode_number}",
"links": give_random_quality(resp.json()["links"]),
}
case "S-mp4":
logger.debug("allanime:Found streams from sharepoint")
yield {
"server": "sharepoint",
"headers": {},
"subtitles": [],
"episode_title": (
allanime_episode["notes"] or f'{anime["title"]}'
)
+ f"; Episode {episode_number}",
"links": give_random_quality(resp.json()["links"]),
}
case "Sak":
logger.debug("allanime:Found streams from dropbox")
yield {
"server": "dropbox",
"headers": {},
"subtitles": [],
"episode_title": (
allanime_episode["notes"] or f'{anime["title"]}'
)
+ f"; Episode {episode_number}",
"links": give_random_quality(resp.json()["links"]),
}
case "Default":
logger.debug("allanime:Found streams from wixmp")
yield {
"server": "wixmp",
"headers": {},
"subtitles": [],
"episode_title": (
allanime_episode["notes"] or f'{anime["title"]}'
)
+ f"; Episode {episode_number}",
"links": give_random_quality(resp.json()["links"]),
}
except Timeout:
logger.error(
"[ALLANIME-ERROR]: Timeout has been exceeded this could mean allanime is down or you have lost internet connection"
)
except Exception as e:
logger.error(f"[ALLANIME-ERROR]: {e}")
except Exception as e:
logger.error(f"[ALLANIME-ERROR]: {e}")
return []
if resp.ok:
match embed["sourceName"]:
case "Luf-mp4":
logger.debug("allanime:Found streams from gogoanime")
return {
"server": "gogoanime",
"headers": {},
"subtitles": [],
"episode_title": (
allanime_episode["notes"] or f'{anime["title"]}'
)
+ f"; Episode {episode_number}",
"links": give_random_quality(resp.json()["links"]),
}
case "Kir":
logger.debug("allanime:Found streams from wetransfer")
return {
"server": "wetransfer",
"headers": {},
"subtitles": [],
"episode_title": (
allanime_episode["notes"] or f'{anime["title"]}'
)
+ f"; Episode {episode_number}",
"links": give_random_quality(resp.json()["links"]),
}
case "S-mp4":
logger.debug("allanime:Found streams from sharepoint")
return {
"server": "sharepoint",
"headers": {},
"subtitles": [],
"episode_title": (
allanime_episode["notes"] or f'{anime["title"]}'
)
+ f"; Episode {episode_number}",
"links": give_random_quality(resp.json()["links"]),
}
case "Sak":
logger.debug("allanime:Found streams from dropbox")
return {
"server": "dropbox",
"headers": {},
"subtitles": [],
"episode_title": (
allanime_episode["notes"] or f'{anime["title"]}'
)
+ f"; Episode {episode_number}",
"links": give_random_quality(resp.json()["links"]),
}
case "Default":
logger.debug("allanime:Found streams from wixmp")
return {
"server": "wixmp",
"headers": {},
"subtitles": [],
"episode_title": (
allanime_episode["notes"] or f'{anime["title"]}'
)
+ f"; Episode {episode_number}",
"links": give_random_quality(resp.json()["links"]),
}
for embed in embeds:
if server := _get_server(embed):
yield server

View File

@@ -11,6 +11,7 @@ from yt_dlp.utils import (
)
from ..base_provider import AnimeProvider
from ..decorators import debug_provider
from .constants import (
ANIMEPAHE_BASE,
ANIMEPAHE_ENDPOINT,
@@ -27,211 +28,199 @@ logger = logging.getLogger(__name__)
KWIK_RE = re.compile(r"Player\|(.+?)'")
# TODO: hack this to completion
class AnimePaheApi(AnimeProvider):
search_page: "AnimePaheSearchPage"
anime: "AnimePaheAnimePage"
HEADERS = REQUEST_HEADERS
@debug_provider("ANIMEPAHE")
def search_for_anime(self, user_query: str, *args):
try:
url = f"{ANIMEPAHE_ENDPOINT}m=search&q={user_query}"
url = f"{ANIMEPAHE_ENDPOINT}m=search&q={user_query}"
response = self.session.get(
url,
)
if not response.ok:
return
data: "AnimePaheSearchPage" = response.json()
self.search_page = data
return {
"pageInfo": {
"total": data["total"],
"perPage": data["per_page"],
"currentPage": data["current_page"],
},
"results": [
{
"availableEpisodes": list(range(result["episodes"])),
"id": result["session"],
"title": result["title"],
"type": result["type"],
"year": result["year"],
"score": result["score"],
"status": result["status"],
"season": result["season"],
"poster": result["poster"],
}
for result in data["data"]
],
}
@debug_provider("ANIMEPAHE")
def get_anime(self, session_id: str, *args):
page = 1
anime_result: "AnimeSearchResult" = [
anime
for anime in self.search_page["data"]
if anime["session"] == session_id
][0]
data: "AnimePaheAnimePage" = {} # pyright:ignore
url = f"{ANIMEPAHE_ENDPOINT}m=release&id={session_id}&sort=episode_asc&page={page}"
def _pages_loader(
url,
page,
):
response = self.session.get(
url,
)
if not response.ok:
return
data: "AnimePaheSearchPage" = response.json()
self.search_page = data
return {
"pageInfo": {
"total": data["total"],
"perPage": data["per_page"],
"currentPage": data["current_page"],
},
"results": [
{
"availableEpisodes": list(range(result["episodes"])),
"id": result["session"],
"title": result["title"],
"type": result["type"],
"year": result["year"],
"score": result["score"],
"status": result["status"],
"season": result["season"],
"poster": result["poster"],
}
for result in data["data"]
],
}
except Exception as e:
logger.error(f"[ANIMEPAHE-ERROR]: {e}")
return {}
def get_anime(self, session_id: str, *args):
page = 1
try:
anime_result: "AnimeSearchResult" = [
anime
for anime in self.search_page["data"]
if anime["session"] == session_id
][0]
data: "AnimePaheAnimePage" = {} # pyright:ignore
url = f"{ANIMEPAHE_ENDPOINT}m=release&id={session_id}&sort=episode_asc&page={page}"
def _pages_loader(
url,
page,
):
response = self.session.get(
url,
)
if response.ok:
if not data:
data.update(response.json())
else:
if ep_data := response.json().get("data"):
data["data"].extend(ep_data)
if response.json()["next_page_url"]:
# TODO: Refine this
time.sleep(
random.choice(
[
0.25,
0.1,
0.5,
0.75,
1,
]
)
)
page += 1
url = f"{ANIMEPAHE_ENDPOINT}m=release&id={session_id}&sort=episode_asc&page={page}"
_pages_loader(
url,
page,
if response.ok:
if not data:
data.update(response.json())
else:
if ep_data := response.json().get("data"):
data["data"].extend(ep_data)
if response.json()["next_page_url"]:
# TODO: Refine this
time.sleep(
random.choice(
[
0.25,
0.1,
0.5,
0.75,
1,
]
)
)
page += 1
url = f"{ANIMEPAHE_ENDPOINT}m=release&id={session_id}&sort=episode_asc&page={page}"
_pages_loader(
url,
page,
)
_pages_loader(
url,
page,
)
_pages_loader(
url,
page,
)
if not data:
return {}
self.anime = data # pyright:ignore
episodes = list(map(str, [episode["episode"] for episode in data["data"]]))
title = ""
return {
"id": session_id,
"title": anime_result["title"],
"year": anime_result["year"],
"season": anime_result["season"],
"poster": anime_result["poster"],
"score": anime_result["score"],
"availableEpisodesDetail": {
"sub": episodes,
"dub": episodes,
"raw": episodes,
},
"episodesInfo": [
{
"title": f"{episode['title'] or title};{episode['episode']}",
"episode": episode["episode"],
"id": episode["session"],
"translation_type": episode["audio"],
"duration": episode["duration"],
"poster": episode["snapshot"],
}
for episode in data["data"]
],
}
except Exception as e:
logger.error(f"[ANIMEPAHE-ERROR]: {e}")
if not data:
return {}
self.anime = data # pyright:ignore
episodes = list(map(str, [episode["episode"] for episode in data["data"]]))
title = ""
return {
"id": session_id,
"title": anime_result["title"],
"year": anime_result["year"],
"season": anime_result["season"],
"poster": anime_result["poster"],
"score": anime_result["score"],
"availableEpisodesDetail": {
"sub": episodes,
"dub": episodes,
"raw": episodes,
},
"episodesInfo": [
{
"title": f"{episode['title'] or title};{episode['episode']}",
"episode": episode["episode"],
"id": episode["session"],
"translation_type": episode["audio"],
"duration": episode["duration"],
"poster": episode["snapshot"],
}
for episode in data["data"]
],
}
@debug_provider("ANIMEPAHE")
def get_episode_streams(self, anime, episode_number: str, translation_type, *args):
try:
# extract episode details from memory
episode = [
episode
for episode in self.anime["data"]
if float(episode["episode"]) == float(episode_number)
]
# extract episode details from memory
episode = [
episode
for episode in self.anime["data"]
if float(episode["episode"]) == float(episode_number)
]
if not episode:
logger.error(
f"[ANIMEPAHE-ERROR]: episode {episode_number} doesn't exist"
if not episode:
logger.error(f"[ANIMEPAHE-ERROR]: episode {episode_number} doesn't exist")
return []
episode = episode[0]
anime_id = anime["id"]
# fetch the episode page
url = f"{ANIMEPAHE_BASE}/play/{anime_id}/{episode['session']}"
response = self.session.get(url)
# get the element containing links to juicy streams
c = get_element_by_id("resolutionMenu", response.text)
resolutionMenuItems = get_elements_html_by_class("dropdown-item", c)
# convert the elements containing embed links to a neat dict containing:
# data-src
# data-audio
# data-resolution
res_dicts = [extract_attributes(item) for item in resolutionMenuItems]
# get the episode title
episode_title = (
f"{episode['title'] or anime['title']}; Episode {episode['episode']}"
)
# get all links
streams = {
"server": "kwik",
"links": [],
"episode_title": episode_title,
"subtitles": [],
"headers": {},
}
for res_dict in res_dicts:
# get embed url
embed_url = res_dict["data-src"]
data_audio = "dub" if res_dict["data-audio"] == "eng" else "sub"
# filter streams by translation_type
if data_audio != translation_type:
continue
if not embed_url:
logger.warn(
"[ANIMEPAHE-WARN]: embed url not found please report to the developers"
)
return []
episode = episode[0]
anime_id = anime["id"]
# fetch the episode page
url = f"{ANIMEPAHE_BASE}/play/{anime_id}/{episode['session']}"
response = self.session.get(url)
# get the element containing links to juicy streams
c = get_element_by_id("resolutionMenu", response.text)
resolutionMenuItems = get_elements_html_by_class("dropdown-item", c)
# convert the elements containing embed links to a neat dict containing:
# data-src
# data-audio
# data-resolution
res_dicts = [extract_attributes(item) for item in resolutionMenuItems]
# get the episode title
episode_title = (
f"{episode['title'] or anime['title']}; Episode {episode['episode']}"
# get embed page
embed_response = self.session.get(
embed_url, headers={"User-Agent": self.USER_AGENT, **SERVER_HEADERS}
)
# get all links
streams = {
"server": "kwik",
"links": [],
"episode_title": episode_title,
"subtitles": [],
"headers": {},
}
for res_dict in res_dicts:
# get embed url
embed_url = res_dict["data-src"]
data_audio = "dub" if res_dict["data-audio"] == "eng" else "sub"
# filter streams by translation_type
if data_audio != translation_type:
continue
if not response.ok:
continue
embed_page = embed_response.text
if not embed_url:
logger.warn(
"[ANIMEPAHE-WARN]: embed url not found please report to the developers"
)
return []
# get embed page
embed_response = self.session.get(
embed_url, headers={"User-Agent": self.USER_AGENT, **SERVER_HEADERS}
)
if not response.ok:
continue
embed_page = embed_response.text
decoded_js = process_animepahe_embed_page(embed_page)
if not decoded_js:
logger.error("[ANIMEPAHE-ERROR]: failed to decode embed page")
return
juicy_stream = JUICY_STREAM_REGEX.search(decoded_js)
if not juicy_stream:
logger.error("[ANIMEPAHE-ERROR]: failed to find juicy stream")
return
juicy_stream = juicy_stream.group(1)
# add the link
streams["links"].append(
{
"quality": res_dict["data-resolution"],
"translation_type": data_audio,
"link": juicy_stream,
}
)
yield streams
except Exception as e:
logger.error(f"[ANIMEPAHE-ERROR]: {e}")
decoded_js = process_animepahe_embed_page(embed_page)
if not decoded_js:
logger.error("[ANIMEPAHE-ERROR]: failed to decode embed page")
return
juicy_stream = JUICY_STREAM_REGEX.search(decoded_js)
if not juicy_stream:
logger.error("[ANIMEPAHE-ERROR]: failed to find juicy stream")
return
juicy_stream = juicy_stream.group(1)
# add the link
streams["links"].append(
{
"quality": res_dict["data-resolution"],
"translation_type": data_audio,
"link": juicy_stream,
}
)
yield streams

View File

@@ -1,236 +0,0 @@
import logging
import re
from html.parser import HTMLParser
from itertools import cycle
from urllib.parse import quote_plus
from yt_dlp.utils import (
clean_html,
extract_attributes,
get_element_by_class,
get_element_html_by_class,
get_elements_by_class,
get_elements_html_by_class,
)
from ..base_provider import AnimeProvider
from ..utils import give_random_quality
from .constants import SERVERS_AVAILABLE
from .types import AniWatchStream
logger = logging.getLogger(__name__)
LINK_TO_STREAMS_REGEX = re.compile(r".*://(.*)/embed-(2|4|6)/e-([0-9])/(.*)\?.*")
IMAGE_HTML_ELEMENT_REGEX = re.compile(r"<img.*?>")
class ParseAnchorAndImgTag(HTMLParser):
def __init__(self):
super().__init__()
self.img_tag = None
self.a_tag = None
def handle_starttag(self, tag, attrs):
if tag == "img":
self.img_tag = {attr[0]: attr[1] for attr in attrs}
if tag == "a":
self.a_tag = {attr[0]: attr[1] for attr in attrs}
class AniWatchApi(AnimeProvider):
# HEADERS = {"Referer": "https://hianime.to/home"}
def search_for_anime(self, anime_title: str, *args):
try:
query = quote_plus(anime_title)
url = f"https://hianime.to/search?keyword={query}"
response = self.session.get(url)
if not response.ok:
return
search_page = response.text
search_results_html_items = get_elements_by_class("flw-item", search_page)
results = []
for search_results_html_item in search_results_html_items:
film_poster_html = get_element_by_class(
"film-poster", search_results_html_item
)
if not film_poster_html:
continue
# get availableEpisodes
episodes_html = get_element_html_by_class("tick-sub", film_poster_html)
episodes = clean_html(episodes_html) or 12
# get anime id and poster image url
parser = ParseAnchorAndImgTag()
parser.feed(film_poster_html)
image_data = parser.img_tag
anime_link_data = parser.a_tag
if not image_data or not anime_link_data:
continue
episodes = int(episodes)
# finally!!
image_link = image_data["data-src"]
anime_id = anime_link_data["data-id"]
title = anime_link_data["title"]
results.append(
{
"availableEpisodes": list(range(1, episodes)),
"id": anime_id,
"title": title,
"poster": image_link,
}
)
self.search_results = results
return {"pageInfo": {}, "results": results}
except Exception as e:
logger.error(f"[ANIWATCH-ERROR]: {e}")
def get_anime(self, aniwatch_id, *args):
try:
anime_result = {}
for anime in self.search_results:
if anime["id"] == aniwatch_id:
anime_result = anime
break
anime_url = f"https://hianime.to/ajax/v2/episode/list/{aniwatch_id}"
response = self.session.get(anime_url, timeout=10)
if response.ok:
response_json = response.json()
aniwatch_anime_page = response_json["html"]
episodes_info_container_html = get_element_html_by_class(
"ss-list", aniwatch_anime_page
)
episodes_info_html_list = get_elements_html_by_class(
"ep-item", episodes_info_container_html
)
# keys: [ data-number: episode_number, data-id: episode_id, title: episode_title , href:episode_page_url]
episodes_info_dicts = [
extract_attributes(episode_dict)
for episode_dict in episodes_info_html_list
]
episodes = [episode["data-number"] for episode in episodes_info_dicts]
self.episodes_info = [
{
"id": episode["data-id"],
"title": (
(episode["title"] or "").replace(
f"Episode {episode['data-number']}", ""
)
or anime_result["title"]
)
+ f"; Episode {episode['data-number']}",
"episode": episode["data-number"],
}
for episode in episodes_info_dicts
]
return {
"id": aniwatch_id,
"availableEpisodesDetail": {
"dub": episodes,
"sub": episodes,
"raw": episodes,
},
"poster": anime_result["poster"],
"title": anime_result["title"],
"episodes_info": self.episodes_info,
}
except Exception as e:
logger.error(f"[ANIWACTCH-ERROR]: {e}")
def get_episode_streams(self, anime, episode, translation_type, *args):
try:
episode_details = [
episode_details
for episode_details in self.episodes_info
if episode_details["episode"] == episode
]
if not episode_details:
return
episode_details = episode_details[0]
episode_url = f"https://hianime.to/ajax/v2/episode/servers?episodeId={episode_details['id']}"
response = self.session.get(episode_url)
if response.ok:
response_json = response.json()
episode_page_html = response_json["html"]
servers_containers_html = get_elements_html_by_class(
"ps__-list", episode_page_html
)
if not servers_containers_html:
return
# sub servers
try:
servers_html_sub = get_elements_html_by_class(
"server-item", servers_containers_html[0]
)
except Exception:
logger.warn("AniWatch: sub not found")
servers_html_sub = None
# dub servers
try:
servers_html_dub = get_elements_html_by_class(
"server-item", servers_containers_html[1]
)
except Exception:
logger.warn("AniWatch: dub not found")
servers_html_dub = None
if translation_type == "dub":
servers_html = servers_html_dub
else:
servers_html = servers_html_sub
if not servers_html:
return
for server_name, server_html in zip(
cycle(SERVERS_AVAILABLE), servers_html
):
try:
# keys: [ data-type: translation_type, data-id: embed_id, data-server-id: server_id ]
servers_info = extract_attributes(server_html)
embed_url = f"https://hianime.to/ajax/v2/episode/sources?id={servers_info['data-id']}"
embed_response = self.session.get(embed_url)
if embed_response.ok:
embed_json = embed_response.json()
raw_link_to_streams = embed_json["link"]
match = LINK_TO_STREAMS_REGEX.match(raw_link_to_streams)
if not match:
continue
provider_domain = match.group(1)
embed_type = match.group(2)
episode_number = match.group(3)
source_id = match.group(4)
link_to_streams = f"https://{provider_domain}/embed-{embed_type}/ajax/e-{episode_number}/getSources?id={source_id}"
link_to_streams_response = self.session.get(link_to_streams)
if link_to_streams_response.ok:
juicy_streams_json: "AniWatchStream" = (
link_to_streams_response.json()
)
yield {
"headers": {},
"subtitles": [
{
"url": track["file"],
"language": track["label"],
}
for track in juicy_streams_json["tracks"]
if track["kind"] == "captions"
],
"server": server_name,
"episode_title": episode_details["title"],
"links": give_random_quality(
[
{"link": link["file"], "type": link["type"]}
for link in juicy_streams_json["sources"]
]
),
}
except Exception as e:
logger.error(f"[ANIWATCH_ERROR]: {e}")
except Exception as e:
logger.error(f"[ANIWATCH_ERROR]: {e}")

View File

@@ -0,0 +1,39 @@
import functools
import logging
import os
logger = logging.getLogger(__name__)
def debug_provider(provider_name: str):
def _provider_function_decorator(provider_function):
@functools.wraps(provider_function)
def _provider_function_wrapper(*args, **kwargs):
if not os.environ.get("FASTANIME_DEBUG"):
try:
return provider_function(*args, **kwargs)
except Exception as e:
logger.error(f"[{provider_name}@{provider_function.__name__}]: {e}")
else:
return provider_function(*args, **kwargs)
return _provider_function_wrapper
return _provider_function_decorator
def ensure_internet_connection(provider_function):
@functools.wraps(provider_function)
def _wrapper(*args, **kwargs):
import requests
try:
requests.get("https://google.com", timeout=5)
except requests.ConnectionError:
from sys import exit
print("You are not connected to the internet;Aborting...")
exit(1)
return provider_function(*args, **kwargs)
return _wrapper

View File

@@ -0,0 +1,231 @@
import logging
import re
from html.parser import HTMLParser
from itertools import cycle
from urllib.parse import quote_plus
from yt_dlp.utils import (
clean_html,
extract_attributes,
get_element_by_class,
get_element_html_by_class,
get_elements_by_class,
get_elements_html_by_class,
)
from ..base_provider import AnimeProvider
from ..decorators import debug_provider
from ..utils import give_random_quality
from .constants import SERVERS_AVAILABLE
from .types import AniWatchStream
logger = logging.getLogger(__name__)
LINK_TO_STREAMS_REGEX = re.compile(r".*://(.*)/embed-(2|4|6)/e-([0-9])/(.*)\?.*")
IMAGE_HTML_ELEMENT_REGEX = re.compile(r"<img.*?>")
class ParseAnchorAndImgTag(HTMLParser):
def __init__(self):
super().__init__()
self.img_tag = None
self.a_tag = None
def handle_starttag(self, tag, attrs):
if tag == "img":
self.img_tag = {attr[0]: attr[1] for attr in attrs}
if tag == "a":
self.a_tag = {attr[0]: attr[1] for attr in attrs}
class HiAnimeApi(AnimeProvider):
# HEADERS = {"Referer": "https://hianime.to/home"}
@debug_provider("ANIWATCH")
def search_for_anime(self, anime_title: str, *args):
query = quote_plus(anime_title)
url = f"https://hianime.to/search?keyword={query}"
response = self.session.get(url)
if not response.ok:
return
search_page = response.text
search_results_html_items = get_elements_by_class("flw-item", search_page)
results = []
for search_results_html_item in search_results_html_items:
film_poster_html = get_element_by_class(
"film-poster", search_results_html_item
)
if not film_poster_html:
continue
# get availableEpisodes
episodes_html = get_element_html_by_class("tick-sub", film_poster_html)
episodes = clean_html(episodes_html) or 12
# get anime id and poster image url
parser = ParseAnchorAndImgTag()
parser.feed(film_poster_html)
image_data = parser.img_tag
anime_link_data = parser.a_tag
if not image_data or not anime_link_data:
continue
episodes = int(episodes)
# finally!!
image_link = image_data["data-src"]
anime_id = anime_link_data["data-id"]
title = anime_link_data["title"]
results.append(
{
"availableEpisodes": list(range(1, episodes)),
"id": anime_id,
"title": title,
"poster": image_link,
}
)
self.search_results = results
return {"pageInfo": {}, "results": results}
@debug_provider("ANIWATCH")
def get_anime(self, aniwatch_id, *args):
anime_result = {}
for anime in self.search_results:
if anime["id"] == aniwatch_id:
anime_result = anime
break
anime_url = f"https://hianime.to/ajax/v2/episode/list/{aniwatch_id}"
response = self.session.get(anime_url, timeout=10)
if response.ok:
response_json = response.json()
aniwatch_anime_page = response_json["html"]
episodes_info_container_html = get_element_html_by_class(
"ss-list", aniwatch_anime_page
)
episodes_info_html_list = get_elements_html_by_class(
"ep-item", episodes_info_container_html
)
# keys: [ data-number: episode_number, data-id: episode_id, title: episode_title , href:episode_page_url]
episodes_info_dicts = [
extract_attributes(episode_dict)
for episode_dict in episodes_info_html_list
]
episodes = [episode["data-number"] for episode in episodes_info_dicts]
self.episodes_info = [
{
"id": episode["data-id"],
"title": (
(episode["title"] or "").replace(
f"Episode {episode['data-number']}", ""
)
or anime_result["title"]
)
+ f"; Episode {episode['data-number']}",
"episode": episode["data-number"],
}
for episode in episodes_info_dicts
]
return {
"id": aniwatch_id,
"availableEpisodesDetail": {
"dub": episodes,
"sub": episodes,
"raw": episodes,
},
"poster": anime_result["poster"],
"title": anime_result["title"],
"episodes_info": self.episodes_info,
}
@debug_provider("ANIWATCH")
def get_episode_streams(self, anime, episode, translation_type, *args):
episode_details = [
episode_details
for episode_details in self.episodes_info
if episode_details["episode"] == episode
]
if not episode_details:
return
episode_details = episode_details[0]
episode_url = f"https://hianime.to/ajax/v2/episode/servers?episodeId={episode_details['id']}"
response = self.session.get(episode_url)
if response.ok:
response_json = response.json()
episode_page_html = response_json["html"]
servers_containers_html = get_elements_html_by_class(
"ps__-list", episode_page_html
)
if not servers_containers_html:
return
# sub servers
try:
servers_html_sub = get_elements_html_by_class(
"server-item", servers_containers_html[0]
)
except Exception:
logger.warning("AniWatch: sub not found")
servers_html_sub = None
# dub servers
try:
servers_html_dub = get_elements_html_by_class(
"server-item", servers_containers_html[1]
)
except Exception:
logger.warning("AniWatch: dub not found")
servers_html_dub = None
if translation_type == "dub":
servers_html = servers_html_dub
else:
servers_html = servers_html_sub
if not servers_html:
return
@debug_provider("ANIWATCH")
def _get_server(server_name, server_html):
# keys: [ data-type: translation_type, data-id: embed_id, data-server-id: server_id ]
servers_info = extract_attributes(server_html)
embed_url = f"https://hianime.to/ajax/v2/episode/sources?id={servers_info['data-id']}"
embed_response = self.session.get(embed_url)
if embed_response.ok:
embed_json = embed_response.json()
raw_link_to_streams = embed_json["link"]
match = LINK_TO_STREAMS_REGEX.match(raw_link_to_streams)
if not match:
return
provider_domain = match.group(1)
embed_type = match.group(2)
episode_number = match.group(3)
source_id = match.group(4)
link_to_streams = f"https://{provider_domain}/embed-{embed_type}/ajax/e-{episode_number}/getSources?id={source_id}"
link_to_streams_response = self.session.get(link_to_streams)
if link_to_streams_response.ok:
juicy_streams_json: "AniWatchStream" = (
link_to_streams_response.json()
)
return {
"headers": {},
"subtitles": [
{
"url": track["file"],
"language": track["label"],
}
for track in juicy_streams_json["tracks"]
if track["kind"] == "captions"
],
"server": server_name,
"episode_title": episode_details["title"],
"links": give_random_quality(
[
{"link": link["file"], "type": link["type"]}
for link in juicy_streams_json["sources"]
]
),
}
for server_name, server_html in zip(cycle(SERVERS_AVAILABLE), servers_html):
if server := _get_server(server_name, server_html):
yield server