From d12be2500b7564c64a7ce5d26e17a50dc6d26256 Mon Sep 17 00:00:00 2001 From: Oshan96 Date: Wed, 25 Mar 2020 11:24:02 +0530 Subject: [PATCH] - Edited README - Testing done for executable version as well (animepahe is on server migration for 8 hours) - Ready to be merged into master for v1.0.0 --- README.md | 4 ++++ anime_downloader/scrapers/animepahe/animepahe_scraper.py | 1 + .../scrapers/animeultima/animeultima_scraper.py | 9 +++++++-- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 48b41d2..8dcbf2e 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,10 @@ Or, in your linux environment, sudo apt install ffmpeg ``` +#### Download failed and weird error came? Don't worry, it's because these websites are protected by various security measures. Simply, just visit the website manually, and restart the anime downloader! + +#### Still not able to download? Go ahead and post your issue [here](https://github.com/Oshan96/Anime-Downloader/issues). And I will look into the error and give necessary fixes! + ## Running the application Navigate to the extracted folder and open a cmd or powershell window from that folder and execute "anime-dl.exe" from command line. diff --git a/anime_downloader/scrapers/animepahe/animepahe_scraper.py b/anime_downloader/scrapers/animepahe/animepahe_scraper.py index 2dcf982..b3bf18c 100644 --- a/anime_downloader/scrapers/animepahe/animepahe_scraper.py +++ b/anime_downloader/scrapers/animepahe/animepahe_scraper.py @@ -113,6 +113,7 @@ def __get_cookie_and_response(self, episode): def __get_token(self, response): printer("INFO", "Collecting access token...", self.gui) page = response.text + # print(page) try: token = re.search("value\|(.*)\|([a-zA-Z])", page).group(1).split("|")[0] # print("TOKEN :", token) diff --git a/anime_downloader/scrapers/animeultima/animeultima_scraper.py b/anime_downloader/scrapers/animeultima/animeultima_scraper.py index 92eaebd..47d8a95 100644 --- a/anime_downloader/scrapers/animeultima/animeultima_scraper.py +++ b/anime_downloader/scrapers/animeultima/animeultima_scraper.py @@ -31,6 +31,7 @@ def get_anime_id(self): return content_data[-2] def get_start_and_end_page(self, anime_id): + # print("start end page") start_page = 0 end_page = 0 @@ -54,6 +55,7 @@ def get_start_and_end_page(self, anime_id): return start_page, end_page def get_page_url(self, url): + # print("get page url") page = self.session.get(url).content soup_html = BeautifulSoup(page, "html.parser") @@ -65,6 +67,7 @@ def get_page_url(self, url): return None def collect_episodes(self, anime_id, start_page, end_page): + # print("collect epis") base_url = "https://www1.animeultima.to/api/episodeList?animeId=" + anime_id + "&page=" page_counter = start_page @@ -105,6 +108,7 @@ def collect_episodes(self, anime_id, start_page, end_page): page_counter += 1 def set_stream_url(self, episode): + # print("set stream") self.extractor.url = episode.page_url stream_url = self.extractor.extract_stream_link(self.resolution) print("Stream URL : " + stream_url) @@ -118,11 +122,12 @@ def set_stream_urls(self): episode.dowload_url = stream_url def get_direct_links(self): + # print("direct links") anime_id = self.get_anime_id() start_page, end_page = self.get_start_and_end_page(anime_id) - print(anime_id) - print(start_page, end_page) + # print(anime_id) + # print(start_page, end_page) try: self.collect_episodes(anime_id, start_page, end_page)