Skip to content

Commit

Permalink
- Edited README
Browse files Browse the repository at this point in the history
- Testing done for executable version as well (animepahe is on server migration for 8 hours)
- Ready to be merged into master for v1.0.0
  • Loading branch information
Oshan96 committed Mar 25, 2020
1 parent e613aa5 commit d12be25
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 2 deletions.
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ Or, in your linux environment,
sudo apt install ffmpeg
```

#### Download failed and weird error came? Don't worry, it's because these websites are protected by various security measures. Simply, just visit the website manually, and restart the anime downloader!

#### Still not able to download? Go ahead and post your issue [here](https://github.com/Oshan96/Anime-Downloader/issues). And I will look into the error and give necessary fixes!

## Running the application
Navigate to the extracted folder and open a cmd or powershell window from that folder and execute "anime-dl.exe" from command line.

Expand Down
1 change: 1 addition & 0 deletions anime_downloader/scrapers/animepahe/animepahe_scraper.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ def __get_cookie_and_response(self, episode):
def __get_token(self, response):
printer("INFO", "Collecting access token...", self.gui)
page = response.text
# print(page)
try:
token = re.search("value\|(.*)\|([a-zA-Z])", page).group(1).split("|")[0]
# print("TOKEN :", token)
Expand Down
9 changes: 7 additions & 2 deletions anime_downloader/scrapers/animeultima/animeultima_scraper.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ def get_anime_id(self):
return content_data[-2]

def get_start_and_end_page(self, anime_id):
# print("start end page")
start_page = 0
end_page = 0

Expand All @@ -54,6 +55,7 @@ def get_start_and_end_page(self, anime_id):
return start_page, end_page

def get_page_url(self, url):
# print("get page url")
page = self.session.get(url).content

soup_html = BeautifulSoup(page, "html.parser")
Expand All @@ -65,6 +67,7 @@ def get_page_url(self, url):
return None

def collect_episodes(self, anime_id, start_page, end_page):
# print("collect epis")
base_url = "https://www1.animeultima.to/api/episodeList?animeId=" + anime_id + "&page="
page_counter = start_page

Expand Down Expand Up @@ -105,6 +108,7 @@ def collect_episodes(self, anime_id, start_page, end_page):
page_counter += 1

def set_stream_url(self, episode):
# print("set stream")
self.extractor.url = episode.page_url
stream_url = self.extractor.extract_stream_link(self.resolution)
print("Stream URL : " + stream_url)
Expand All @@ -118,11 +122,12 @@ def set_stream_urls(self):
episode.dowload_url = stream_url

def get_direct_links(self):
# print("direct links")
anime_id = self.get_anime_id()
start_page, end_page = self.get_start_and_end_page(anime_id)

print(anime_id)
print(start_page, end_page)
# print(anime_id)
# print(start_page, end_page)

try:
self.collect_episodes(anime_id, start_page, end_page)
Expand Down

0 comments on commit d12be25

Please sign in to comment.