diff --git a/backend/controllers/webooks.py b/backend/controllers/webhooks.py similarity index 99% rename from backend/controllers/webooks.py rename to backend/controllers/webhooks.py index 68fa7dfb..6cc1c664 100644 --- a/backend/controllers/webooks.py +++ b/backend/controllers/webhooks.py @@ -2,7 +2,6 @@ from datetime import datetime import pydantic -import requests from fastapi import APIRouter, HTTPException, Request from program.indexers.trakt import get_imdbid_from_tmdb from program.settings.manager import settings_manager diff --git a/backend/program/cache.py b/backend/program/cache.py index 4e2ab9a8..12d566a1 100644 --- a/backend/program/cache.py +++ b/backend/program/cache.py @@ -9,7 +9,7 @@ class HashCache: """A class for caching hashes with additional metadata and a time-to-live (TTL) mechanism.""" - def __init__(self, ttl: int = 420, maxsize: int = 2000): + def __init__(self, ttl: int = 900, maxsize: int = 10000): """ Initializes the HashCache with a specified TTL and maximum size. @@ -78,3 +78,6 @@ def clear_cache(self) -> None: def _get_cache_entry(self, infohash: str) -> dict: """Helper function to get a cache entry or create a new one if it doesn't exist.""" return self.cache.get(infohash, {"blacklisted": False, "downloaded": False, "added_at": datetime.now()}) + + +hash_cache = HashCache() diff --git a/backend/program/content/trakt.py b/backend/program/content/trakt.py index 8d52648a..c88358d6 100644 --- a/backend/program/content/trakt.py +++ b/backend/program/content/trakt.py @@ -1,8 +1,8 @@ """Trakt content module""" import time from types import SimpleNamespace -from urllib.parse import urlparse - +from urllib.parse import urlencode, urlparse +from utils.request import RateLimiter, post import regex from program.media.item import MediaItem, Movie, Show @@ -27,8 +27,8 @@ def __init__(self): if not self.initialized: return self.next_run_time = 0 - self.items_already_seen = set() # Use a set for faster lookups - self.items_to_yield = {} + self.items_already_seen = set() + self.missing() logger.success("Trakt initialized!") def validate(self) -> bool: @@ -51,79 +51,84 @@ def validate(self) -> bool: return False return True + def missing(self): + """Log missing items from Trakt""" + if not self.settings.watchlist: + logger.log("TRAKT", "No watchlist configured.") + if not self.settings.user_lists: + logger.log("TRAKT", "No user lists configured.") + if not self.settings.fetch_trending: + logger.log("TRAKT", "Trending fetching is disabled.") + if not self.settings.fetch_popular: + logger.log("TRAKT", "Popular fetching is disabled.") + def run(self): - """Fetch media from Trakt and yield Movie or Show instances.""" + """Fetch media from Trakt and yield Movie, Show, or MediaItem instances.""" current_time = time.time() if current_time < self.next_run_time: return self.next_run_time = current_time + self.settings.update_interval watchlist_ids = self._get_watchlist(self.settings.watchlist) - collection_ids = self._get_collections(self.settings.collections) user_list_ids = self._get_list(self.settings.user_lists) trending_ids = self._get_trending_items() if self.settings.fetch_trending else [] popular_ids = self._get_popular_items() if self.settings.fetch_popular else [] # Combine all IMDb IDs and types - all_items = watchlist_ids + collection_ids + user_list_ids + trending_ids + popular_ids - all_ids = set(all_items) - logger.log("TRAKT", f"Fetched {len(all_ids)} unique IMDb IDs from Trakt.") + all_items = { + "Watchlist": watchlist_ids, + "User Lists": user_list_ids, + "Trending": trending_ids, + "Popular": popular_ids + } - for imdb_id, item_type in all_ids: - if imdb_id in self.items_already_seen or not imdb_id: - continue - self.items_already_seen.add(imdb_id) - - if item_type == "movie": - media_item = Movie({ - "imdb_id": imdb_id, - "requested_by": self.key - }) - else: - media_item = Show({ - "imdb_id": imdb_id, - "requested_by": self.key - }) - - yield media_item - self.items_to_yield.clear() - - def _get_watchlist(self, watchlist_items: list) -> list: + total_new_items = 0 + + for source, items in all_items.items(): + new_items_count = 0 + for imdb_id, item_type in items: + if imdb_id in self.items_already_seen or not imdb_id: + continue + self.items_already_seen.add(imdb_id) + new_items_count += 1 + + if source == "Popular": + media_item = MediaItem({ + "imdb_id": imdb_id, + "requested_by": self.key + }) + elif item_type == "movie": + media_item = Movie({ + "imdb_id": imdb_id, + "requested_by": self.key + }) + else: + media_item = Show({ + "imdb_id": imdb_id, + "requested_by": self.key + }) + + yield media_item + + if new_items_count > 0: + logger.log("TRAKT", f"New items fetched from {source}: {new_items_count}") + total_new_items += new_items_count + if total_new_items > 0: + logger.log("TRAKT", f"Total new items fetched: {total_new_items}") + + def _get_watchlist(self, watchlist_users: list) -> list: """Get IMDb IDs from Trakt watchlist""" - if not watchlist_items: - logger.warning("No watchlist items configured.") + if not watchlist_users: return [] imdb_ids = [] - for url in watchlist_items: - match = regex.match(r'https://trakt.tv/users/([^/]+)/watchlist', url) - if not match: - logger.error(f"Invalid watchlist URL: {url}") - continue - user = match.group(1) + for user in watchlist_users: items = get_watchlist_items(self.api_url, self.headers, user) imdb_ids.extend(self._extract_imdb_ids(items)) return imdb_ids - def _get_collections(self, collection_items: list) -> list: - """Get IMDb IDs from Trakt collections""" - if not collection_items: - logger.warning("No collection items configured.") - return [] - imdb_ids = [] - for url in collection_items: - match = regex.match(r'https://trakt.tv/users/([^/]+)/collection', url) - if not match: - logger.error(f"Invalid collection URL: {url}") - continue - user = match.group(1) - items = get_user_list(self.api_url, self.headers, user, "collection") - imdb_ids.extend(self._extract_imdb_ids(items)) - return imdb_ids - def _get_list(self, list_items: list) -> list: """Get IMDb IDs from Trakt user list""" - if not list_items: - logger.warning("No user list items configured.") + if not list_items or not any(list_items): return [] imdb_ids = [] for url in list_items: @@ -134,7 +139,15 @@ def _get_list(self, list_items: list) -> list: user, list_name = match.groups() list_name = urlparse(url).path.split('/')[-1] items = get_user_list(self.api_url, self.headers, user, list_name) - imdb_ids.extend(self._extract_imdb_ids(items)) + for item in items: + if hasattr(item, "movie"): + imdb_id = getattr(item.movie.ids, "imdb", None) + if imdb_id: + imdb_ids.append((imdb_id, "movie")) + elif hasattr(item, "show"): + imdb_id = getattr(item.show.ids, "imdb", None) + if imdb_id: + imdb_ids.append((imdb_id, "show")) return imdb_ids def _get_trending_items(self) -> list: @@ -147,37 +160,81 @@ def _get_popular_items(self) -> list: """Get IMDb IDs from Trakt popular items""" popular_movies = get_popular_items(self.api_url, self.headers, "movies", self.settings.popular_count) popular_shows = get_popular_items(self.api_url, self.headers, "shows", self.settings.popular_count) - return self._extract_imdb_ids(popular_movies + popular_shows) + return self._extract_imdb_ids_with_none_type(popular_movies + popular_shows) def _extract_imdb_ids(self, items: list) -> list: """Extract IMDb IDs and types from a list of items""" imdb_ids = [] for item in items: - show = getattr(item, "show", None) - if show: - ids = getattr(show, "ids", None) + if hasattr(item, "show"): + ids = getattr(item.show, "ids", None) if ids: imdb_id = getattr(ids, "imdb", None) if imdb_id: imdb_ids.append((imdb_id, "show")) - else: - ids = getattr(item, "ids", None) + elif hasattr(item, "movie"): + ids = getattr(item.movie, "ids", None) if ids: imdb_id = getattr(ids, "imdb", None) if imdb_id: imdb_ids.append((imdb_id, "movie")) return imdb_ids + def _extract_imdb_ids_with_none_type(self, items: list) -> list: + """Extract IMDb IDs from a list of items, returning None for type""" + imdb_ids = [] + for item in items: + ids = getattr(item, "ids", None) + if ids: + imdb_id = getattr(ids, "imdb", None) + if imdb_id: + imdb_ids.append((imdb_id, None)) + return imdb_ids + + def perform_oauth_flow(self) -> str: + """Initiate the OAuth flow and return the authorization URL.""" + params = { + "response_type": "code", + "client_id": self.settings.oauth_client_id, + "redirect_uri": self.settings.oauth_redirect_uri, + } + auth_url = f"{self.api_url}/oauth/authorize?{urlencode(params)}" + return auth_url + + def handle_oauth_callback(self, code: str) -> bool: + """Handle the OAuth callback and exchange the code for an access token.""" + token_url = f"{self.api_url}/oauth/token" + payload = { + "code": code, + "client_id": self.settings.oauth_client_id, + "client_secret": self.settings.oauth_client_secret, + "redirect_uri": self.settings.oauth_redirect_uri, + "grant_type": "authorization_code", + } + response = post(token_url, data=payload, additional_headers=self.headers) + if response.is_ok: + token_data = response.data + self.settings.access_token = token_data.get("access_token") + self.settings.refresh_token = token_data.get("refresh_token") + settings_manager.save() # Save the tokens to settings + return True + else: + logger.error(f"Failed to obtain OAuth token: {response.status_code}") + return False ## API functions for Trakt +rate_limiter = RateLimiter(max_calls=1000, period=300) + def _fetch_data(url, headers, params): - """Fetch paginated data from Trakt API.""" + """Fetch paginated data from Trakt API with rate limiting.""" all_data = [] page = 1 + while True: try: - response = get(url, params={**params, "page": page}, additional_headers=headers) + with rate_limiter: + response = get(url, params={**params, "page": page}, additional_headers=headers) if response.is_ok: data = response.data if not data: @@ -186,6 +243,9 @@ def _fetch_data(url, headers, params): if len(data) <= params["limit"]: break page += 1 + elif response.status_code == 429: + logger.warning("Rate limit exceeded. Retrying after rate limit period.") + rate_limiter.limit_hit() else: logger.error(f"Failed to fetch data: {response.status_code}") break @@ -209,11 +269,6 @@ def get_liked_lists(api_url, headers, limit=10): url = f"{api_url}/users/likes/lists" return _fetch_data(url, headers, {"limit": limit}) -def get_recommendations(api_url, headers, media_type, limit=10): - """Get recommendations from Trakt with pagination support.""" - url = f"{api_url}/recommendations/{media_type}" - return _fetch_data(url, headers, {"limit": limit}) - def get_trending_items(api_url, headers, media_type, limit=10): """Get trending items from Trakt with pagination support.""" url = f"{api_url}/{media_type}/trending" diff --git a/backend/program/downloaders/realdebrid.py b/backend/program/downloaders/realdebrid.py index ae277628..45570847 100644 --- a/backend/program/downloaders/realdebrid.py +++ b/backend/program/downloaders/realdebrid.py @@ -176,9 +176,9 @@ def _is_wanted_movie(self, container: dict, item: Movie) -> bool: ) # lets create a regex pattern to remove deleted scenes and samples and trailers from the filenames list - unwanted_regex = regex.compile(r"\b(?:deleted.scene|sample|trailer|featurette)\b", regex.IGNORECASE) - filenames = [file for file in filenames if not unwanted_regex.search(file["filename"])] - + # unwanted_regex = regex.compile(r"\b(?:deleted.scene|sample|trailer|featurette)\b", regex.IGNORECASE) + # filenames = [file for file in filenames if not unwanted_regex.search(file["filename"])] + if not filenames: return False diff --git a/backend/program/libraries/plex.py b/backend/program/libraries/plex.py index 600e2551..3fdd042a 100644 --- a/backend/program/libraries/plex.py +++ b/backend/program/libraries/plex.py @@ -111,16 +111,20 @@ def run(self): # Gather all results for future in concurrent.futures.as_completed(futures): - chunk_results = future.result() - items.extend(chunk_results) - with self.lock: - self.last_fetch_times[section.key] = datetime.now() - processed_sections.add(section.key) + try: + chunk_results = future.result(timeout=2) # Add timeout to speed up shutdown + items.extend(chunk_results) + with self.lock: + self.last_fetch_times[section.key] = datetime.now() + processed_sections.add(section.key) + except concurrent.futures.TimeoutError: + logger.warning("Timeout while waiting for chunk processing result.") + except Exception as e: + logger.exception(f"Failed to get chunk result: {e}") if not processed_sections: - logger.error("Failed to process any sections. Check your library_path settings.") - - logger.log("PLEX", f"Processed {len(items)} items.") + return [] + return items except Exception as e: logger.exception(f"Unexpected error occurred: {e}") diff --git a/backend/program/media/container.py b/backend/program/media/container.py index ab27018f..feb8fe63 100644 --- a/backend/program/media/container.py +++ b/backend/program/media/container.py @@ -116,7 +116,7 @@ def upsert(self, item: MediaItem) -> None: finally: self.lock.release_write() - def _merge_items(self, existing_item, new_item): + def _merge_items(self, existing_item: MediaItem, new_item: MediaItem) -> None: """Merge new item data into existing item without losing existing state.""" if existing_item.state == States.Completed and new_item.state != States.Completed: return @@ -215,7 +215,7 @@ def get_incomplete_items(self) -> Dict[ItemId, MediaItem]: finally: self.lock.release_read() - def save(self, filename): + def save(self, filename: str) -> None: if not self._items: return @@ -241,12 +241,12 @@ def save(self, filename): except OSError as remove_error: logger.error(f"Failed to remove temporary file: {remove_error}") - def load(self, filename): + def load(self, filename: str) -> None: try: with open(filename, "rb") as file: from_disk: MediaItemContainer = dill.load(file) # noqa: S301 except FileNotFoundError: - logger.error(f"Cannot find cached media data at {filename}") + logger.error(f"Unable to find the media library file. Starting fresh.") return except (EOFError, dill.UnpicklingError) as e: logger.error(f"Failed to unpickle media data: {e}. Starting fresh.") diff --git a/backend/program/media/item.py b/backend/program/media/item.py index 0102f0c5..8b7e2081 100644 --- a/backend/program/media/item.py +++ b/backend/program/media/item.py @@ -309,10 +309,11 @@ def fill_in_missing_children(self, other: Self): def add_season(self, season): """Add season to show""" - self.seasons.append(season) - season.parent = self - season.item_id.parent_id = self.item_id - self.seasons = sorted(self.seasons, key=lambda s: s.number) + if season.number not in [s.number for s in self.seasons]: + self.seasons.append(season) + season.parent = self + season.item_id.parent_id = self.item_id + self.seasons = sorted(self.seasons, key=lambda s: s.number) class Season(MediaItem): diff --git a/backend/program/program.py b/backend/program/program.py index d77782c8..5c491fb6 100644 --- a/backend/program/program.py +++ b/backend/program/program.py @@ -21,7 +21,7 @@ from utils import data_dir_path from utils.logger import logger, scrub_logs -from .cache import HashCache +from .cache import hash_cache from .pickly import Pickly from .state_transition import process_event from .symlink import Symlinker @@ -49,15 +49,14 @@ def initialize_services(self): TraktContent: TraktContent(), } self.indexing_services = {TraktIndexer: TraktIndexer()} - self.hash_cache = HashCache(420, 10000) self.processing_services = { - Scraping: Scraping(self.hash_cache), + Scraping: Scraping(hash_cache), Symlinker: Symlinker(), PlexUpdater: PlexUpdater(), } self.downloader_services = { - Debrid: Debrid(self.hash_cache), - TorBoxDownloader: TorBoxDownloader(self.hash_cache), + Debrid: Debrid(hash_cache), + TorBoxDownloader: TorBoxDownloader(hash_cache), } # Depends on Symlinker having created the file structure so needs # to run after it diff --git a/backend/program/scrapers/torbox.py b/backend/program/scrapers/torbox.py index 5b9183f5..a260da25 100644 --- a/backend/program/scrapers/torbox.py +++ b/backend/program/scrapers/torbox.py @@ -3,8 +3,8 @@ from program.media.item import Episode, MediaItem, Movie, Season, Show from program.settings.manager import settings_manager from program.settings.versions import models -from requests import ConnectTimeout, ReadTimeout, RequestException -from requests.exceptions import RetryError +from requests import RequestException +from requests.exceptions import RetryError, ReadTimeout, ConnectTimeout from RTN import RTN, Torrent, sort_torrents from RTN.exceptions import GarbageTorrent from utils.logger import logger @@ -35,7 +35,7 @@ def validate(self) -> bool: return False try: - response = ping(f"{self.base_url}/torrents/imdb:tt0080684") + response = ping(f"{self.base_url}/torrents/imdb:tt0944947?metadata=false&season=1&episode=1", timeout=60) return response.ok except Exception as e: logger.exception(f"Error validating TorBox Scraper: {e}") @@ -50,30 +50,32 @@ def run(self, item: MediaItem) -> Generator[MediaItem, None, None]: try: yield self.scrape(item) - except RateLimitExceeded: + except Exception as e: self.minute_limiter.limit_hit() self.second_limiter.limit_hit() + self.handle_exception(e, item) + yield item + + def handle_exception(self, e: Exception, item: MediaItem) -> None: + """Handle exceptions during scraping""" + if isinstance(e, RateLimitExceeded): logger.log("NOT_FOUND", f"TorBox is caching request for {item.log_string}, will retry later") - except ConnectTimeout: - self.minute_limiter.limit_hit() - self.second_limiter.limit_hit() + elif isinstance(e, ConnectTimeout): logger.log("NOT_FOUND", f"TorBox is caching request for {item.log_string}, will retry later") - except ReadTimeout: - self.minute_limiter.limit_hit() + elif isinstance(e, ReadTimeout): logger.warning(f"TorBox read timeout for item: {item.log_string}") - except RetryError: - self.minute_limiter.limit_hit() + elif isinstance(e, RetryError): logger.warning(f"TorBox retry error for item: {item.log_string}") - except RequestException as e: - self.minute_limiter.limit_hit() - if e.response.status_code == 418: + elif isinstance(e, TimeoutError): + logger.warning(f"TorBox timeout error for item: {item.log_string}") + elif isinstance(e, RequestException): + if e.response and e.response.status_code == 418: logger.log("NOT_FOUND", f"TorBox has no metadata for item: {item.log_string}, unable to scrape") - else: - pass # Hide other TorBox connection errors - except Exception as e: - self.minute_limiter.limit_hit() - logger.exception(f"TorBox exception thrown: {e}") - yield item + elif e.response and e.response.status_code == 500: + logger.log("NOT_FOUND", f"TorBox is caching request for {item.log_string}, will retry later") + else: + logger.error(f"TorBox exception thrown: {e}") + def scrape(self, item: MediaItem) -> MediaItem: """Scrape the given item""" diff --git a/backend/program/settings/models.py b/backend/program/settings/models.py index 060872b8..1934e03b 100644 --- a/backend/program/settings/models.py +++ b/backend/program/settings/models.py @@ -97,7 +97,6 @@ class TraktModel(Updatable): enabled: bool = False api_key: str = "" watchlist: list[str] = [] - collections: list[str] = [] user_lists: list[str] = [] fetch_trending: bool = False trending_count: int = 10 @@ -106,6 +105,15 @@ class TraktModel(Updatable): update_interval: int = 300 +class TraktOauthModel(BaseModel): + # This is for app settings to handle oauth with trakt + oauth_client_id: str = "" + oauth_client_secret: str = "" + oauth_redirect_uri: str = "" + access_token: str = "" + refresh_token: str = "" + + class ContentModel(Observable): overseerr: OverseerrModel = OverseerrModel() plex_watchlist: PlexWatchlistModel = PlexWatchlistModel() diff --git a/backend/program/symlink.py b/backend/program/symlink.py index c64170a8..83375353 100644 --- a/backend/program/symlink.py +++ b/backend/program/symlink.py @@ -8,6 +8,7 @@ from utils.logger import logger from watchdog.events import FileSystemEventHandler from watchdog.observers import Observer +from .cache import hash_cache class DeleteHandler(FileSystemEventHandler): @@ -51,15 +52,11 @@ def __init__(self): def validate(self): """Validate paths and create the initial folders.""" library_path = self.settings.library_path - if ( - not self.rclone_path - or not library_path - or self.rclone_path == Path(".") - or library_path == Path(".") - ): - logger.error( - "rclone_path or library_path not provided, is empty, or is set to the current directory." - ) + if not self.rclone_path or not library_path: + logger.error("rclone_path or library_path not provided.") + return False + if self.rclone_path == Path(".") or library_path == Path("."): + logger.error("rclone_path or library_path is set to the current directory.") return False if not self.rclone_path.is_absolute(): logger.error(f"rclone_path is not an absolute path: {self.rclone_path}") @@ -67,20 +64,13 @@ def validate(self): if not library_path.is_absolute(): logger.error(f"library_path is not an absolute path: {library_path}") return False - try: - if not self.create_initial_folders(): - logger.error( - "Failed to create initial library folders in your library_path." - ) - return False - return True - except FileNotFoundError as e: - logger.error(f"Path not found: {e}") - except PermissionError as e: - logger.error(f"Permission denied when accessing path: {e}") - except OSError as e: - logger.error(f"OS error when validating paths: {e}") - return False + if not self.rclone_path.exists(): + logger.error(f"rclone_path does not exist: {self.rclone_path}") + return False + if not library_path.exists(): + logger.error(f"library_path does not exist: {library_path}") + return False + return self.create_initial_folders() def start_monitor(self): """Starts monitoring the library path for symlink deletions.""" @@ -102,8 +92,11 @@ def stop_monitor(self): def on_symlink_deleted(self, symlink_path): """Handle a symlink deletion event.""" src = Path(symlink_path) - dst = Path(symlink_path).resolve() - logger.log("FILES", f"Symlink deleted: {src} -> {dst}") + if src.is_symlink(): + dst = src.resolve() + logger.log("FILES", f"Symlink deleted: {src} -> {dst}") + else: + logger.log("FILES", f"Symlink deleted: {src} (target unknown)") # TODO: Implement logic to handle deletion.. def create_initial_folders(self): @@ -148,27 +141,34 @@ def run(self, item): @staticmethod def should_submit(item) -> bool: """Check if the item should be submitted for symlink creation.""" - if Symlinker.file_check(item): - return True + if isinstance(item, (Movie, Episode)): + if Symlinker.file_check(item): + return True # If we've tried 3 times to symlink the file, give up if item.symlinked_times >= 3: if isinstance(item, (Movie, Episode)): item.set("file", None) item.set("folder", None) - item.set("streams", {}) # making sure we rescrape + item.set("streams", {}) # Ensure rescraping item.set("symlinked_times", 0) + infohash = item.active_stream.get("hash") + if infohash: + hash_cache.blacklist(infohash) + else: + logger.error(f"Failed to retrieve hash for {item.log_string}, unable to blacklist") return False # If the file doesn't exist, we should wait a bit and try again - logger.debug(f"Sleeping for 10 seconds before checking if file exists again for {item.log_string}") + logger.log("NOT_FOUND", f"Retrying file check in 10 seconds: {item.log_string}") time.sleep(10) return True @staticmethod def file_check(item: MediaItem) -> bool: """Check if the file exists in the rclone path.""" - if not item.file: + if not item.file or item.file == "None.mkv": + logger.error(f"Invalid file for {item.log_string}: {item.file}. Needs to be rescraped.") return False try: @@ -185,16 +185,30 @@ def file_check(item: MediaItem) -> bool: alt_file_path = rclone_path / item.alternative_folder / item.file if item.alternative_folder else None thd_file_path = rclone_path / item.file / item.file - if std_file_path and std_file_path.exists(): - return True - elif alt_file_path and alt_file_path.exists(): - item.set("folder", item.alternative_folder) - return True - elif thd_file_path.exists(): - item.set("folder", item.file) - return True - - logger.log("FILES", f"File not found for {item.log_string} with file: {item.file}") + for attempt in range(2): + if std_file_path and std_file_path.exists(): + return True + elif alt_file_path and alt_file_path.exists(): + item.set("folder", item.alternative_folder) + return True + elif thd_file_path.exists(): + item.set("folder", item.file) + return True + + if attempt < 1: + logger.log("FILES", f"File not found for {item.log_string} with file: {item.file}. Retrying in 10 seconds...") + time.sleep(10) + else: + logger.log("FILES", f"File not found for {item.log_string} after 1 attempt. Searching entire rclone_path...") + + # On the 2nd attempt, search the entire rclone_path + for file_path in rclone_path.rglob(item.file): + if file_path.exists(): + item.set("folder", str(file_path.parent.relative_to(rclone_path))) + logger.log("FILES", f"File found for {item.log_string} by searching rclone_path: {file_path}") + return True + + logger.log("FILES", f"File not found for {item.log_string} with file: {item.file} after searching rclone_path.") return False def _determine_file_name(self, item) -> str | None: @@ -233,6 +247,10 @@ def _symlink(self, item) -> bool: def _symlink_single(self, item) -> bool: """Create a symlink for a single media item.""" + if not item.file or item.file == "None.mkv": + logger.error(f"Cannot create symlink for {item.log_string}: Invalid file {item.file}. Needs to be rescraped.") + return False + extension = os.path.splitext(item.file)[1][1:] symlink_filename = f"{self._determine_file_name(item)}.{extension}" destination = self._create_item_folders(item, symlink_filename) @@ -306,3 +324,4 @@ def _create_item_folders(self, item, filename) -> str: destination_path = os.path.join(season_path, filename.replace("/", "-")) item.set("update_folder", os.path.join(season_path)) return destination_path + diff --git a/backend/utils/request.py b/backend/utils/request.py index 98822153..5a237f4a 100644 --- a/backend/utils/request.py +++ b/backend/utils/request.py @@ -86,8 +86,6 @@ def _make_request( response = session.request( method, url, headers=headers, data=data, params=params, timeout=timeout ) - except requests.ReadTimeout: - response = _handle_request_exception() except Exception: response = _handle_request_exception() finally: