diff --git a/.github/workflows/docker-build-dev.yml b/.github/workflows/docker-build-dev.yml new file mode 100644 index 00000000..b130d125 --- /dev/null +++ b/.github/workflows/docker-build-dev.yml @@ -0,0 +1,57 @@ +name: Docker dev branch Build and Push + +on: + push: + branches: + - dev + workflow_dispatch: + +jobs: + build-and-push: + runs-on: ubuntu-latest + permissions: + contents: write + packages: write + security-events: write + + steps: + - name: Docker Setup QEMU + uses: docker/setup-qemu-action@v3 + id: qemu + with: + platforms: amd64,arm64 + + - name: Log into ghcr.io registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: set lower case owner name + run: | + echo "GITHUB_OWNER_LC=${OWNER,,}" >>${GITHUB_ENV} + env: + OWNER: '${{ github.repository_owner }}' + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + # context: . + file: ./Dockerfile + platforms: linux/amd64,linux/arm64 + push: true + cache-from: type=gha + cache-to: type=gha,mode=max + tags: | + ghcr.io/${{ env.GITHUB_OWNER_LC }}/${{ github.event.repository.name }}:dev + docker.io/spoked/iceberg:dev diff --git a/Dockerfile b/Dockerfile index 91257930..48acfa88 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,12 +14,14 @@ LABEL name="Iceberg" \ description="Iceberg Debrid Downloader" \ url="https://github.com/dreulavelle/iceberg" -RUN apk --update add python3 py3-pip bash shadow vim nano rclone && \ +RUN apk --update add --no-cache python3 py3-pip bash shadow && \ rm -rf /var/cache/apk/* WORKDIR /iceberg -ENV ORIGIN http://localhost:3000 -ARG ORIGIN=http://localhost:3000 + +ARG ORIGIN +ENV ORIGIN=${ORIGIN:-http://localhost:3000} + EXPOSE 3000 8080 # Frontend diff --git a/README.md b/README.md index 022b8f1e..667c945d 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,7 @@ version: "3.8" services: iceberg: image: spoked/iceberg:latest - container_name: Iceberg + container_name: iceberg restart: unless-stopped environment: PUID: "1000" @@ -72,6 +72,7 @@ services: - "3000:3000" volumes: - ./data:/iceberg/data + - /mnt:/mnt ``` Then run `docker compose up -d` to start the container in the background. You can then access the web interface at `http://localhost:3000` or whatever port and origin you set in the `docker-compose.yml` file. diff --git a/backend/controllers/items.py b/backend/controllers/items.py index 560d1a35..44bc2cd8 100644 --- a/backend/controllers/items.py +++ b/backend/controllers/items.py @@ -31,7 +31,7 @@ async def get_extended_item_info(request: Request, item_id: str): raise HTTPException(status_code=404, detail="Item not found") return { "success": True, - "item": item.to_extended_dict(), # Assuming this method exists + "item": item.to_extended_dict(), } @@ -42,3 +42,14 @@ async def remove_item(request: Request, item: str): "success": True, "message": f"Removed {item}", } + + +@router.get("/imdb/{imdb_id}") +async def get_imdb_info(request: Request, imdb_id: str): + item = request.app.program.media_items.get_item_by_imdb_id(imdb_id) + if item is None: + raise HTTPException(status_code=404, detail="Item not found") + return { + "success": True, + "item": item.to_extended_dict() + } \ No newline at end of file diff --git a/backend/program/__init__.py b/backend/program/__init__.py index ae415b63..4a4ae483 100644 --- a/backend/program/__init__.py +++ b/backend/program/__init__.py @@ -58,9 +58,13 @@ def validate(self): return all(service.initialized for service in self.core_manager.services) def stop(self): - for service in self.core_manager.services: - if getattr(service, "running", False): - service.stop() - self.pickly.stop() - settings.save() - self.running = False + try: + for service in self.core_manager.services: + if getattr(service, "running", False): + service.stop() + self.pickly.stop() + settings.save() + self.running = False + except Exception as e: + logger.error("Iceberg stopping with exception: %s", e) + pass \ No newline at end of file diff --git a/backend/program/content/listrr.py b/backend/program/content/listrr.py index b60b269a..6104c128 100644 --- a/backend/program/content/listrr.py +++ b/backend/program/content/listrr.py @@ -35,17 +35,32 @@ def __init__(self, media_items: MediaItemContainer): logger.info("Listrr initialized!") def validate_settings(self) -> bool: + """Validate Listrr settings.""" if not self.settings.enabled: logger.debug("Listrr is set to disabled.") return False if self.settings.api_key == "" or len(self.settings.api_key) != 64: - logger.error("Listrr api key is not set.") + logger.error("Listrr api key is not set or invalid.") + return False + valid_list_found = False + for list_name, content_list in [('movie_lists', self.settings.movie_lists), + ('show_lists', self.settings.show_lists)]: + if content_list is None or not any(content_list): + continue + for item in content_list: + if item == "" or len(item) != 24: + return False + valid_list_found = True + if not valid_list_found: + logger.error("Both Movie and Show lists are empty or not set.") return False try: response = ping("https://listrr.pro/", additional_headers=self.headers) + if not response.ok: + logger.error(f"Listrr ping failed - Status Code: {response.status_code}, Reason: {response.reason}") return response.ok - except Exception: - logger.error("Listrr url is not reachable.") + except Exception as e: + logger.error(f"Listrr ping exception: {e}") return False def run(self): @@ -56,7 +71,8 @@ def run(self): movie_items = self._get_items_from_Listrr("Movies", self.settings.movie_lists) show_items = self._get_items_from_Listrr("Shows", self.settings.show_lists) items = list(set(movie_items + show_items)) - container = self.updater.create_items(items) + new_items = [item for item in items if item not in self.media_items] + container = self.updater.create_items(new_items) for item in container: item.set("requested_by", "Listrr") added_items = self.media_items.extend(container) @@ -87,11 +103,11 @@ def _get_items_from_Listrr(self, content_type, content_lists): imdb_id = item.imDbId if imdb_id: unique_ids.add(imdb_id) - elif content_type == "Shows" and item.tvDbId: - imdb_id = get_imdbid_from_tvdb(item.tvDbId) - if imdb_id: - unique_ids.add(imdb_id) - elif content_type == "Movies" and item.tmDbId: + # elif content_type == "Shows" and item.tvDbId: + # imdb_id = get_imdbid_from_tvdb(item.tvDbId) + # if imdb_id: + # unique_ids.add(imdb_id) + if not imdb_id and content_type == "Movies" and item.tmDbId: imdb_id = get_imdbid_from_tmdb(item.tmDbId) if imdb_id: unique_ids.add(imdb_id) diff --git a/backend/program/content/overseerr.py b/backend/program/content/overseerr.py index 5b011ac5..4ba3924e 100644 --- a/backend/program/content/overseerr.py +++ b/backend/program/content/overseerr.py @@ -42,6 +42,11 @@ def validate_settings(self) -> bool: additional_headers=self.headers, timeout=15, ) + if response.status_code >= 201: + logger.error( + f"Overseerr ping failed - Status Code: {response.status_code}, Reason: {response.reason}" + ) + return False return response.ok except Exception: logger.error("Overseerr url is not reachable.") @@ -78,7 +83,6 @@ def _get_items_from_overseerr(self, amount: int): ids.append(imdb_id) else: ids.append(item.media.imdbId) - return ids def get_imdb_id(self, overseerr_item): @@ -97,31 +101,26 @@ def get_imdb_id(self, overseerr_item): self.settings.url + f"/api/v1/{overseerr_item.mediaType}/{external_id}?language=en", additional_headers=self.headers, ) - if response.is_ok and response.data.externalIds: - imdb_id = response.data.externalIds.imdbId - if imdb_id: - return imdb_id - elif not imdb_id and response.data.externalIds.tvdbId: - imdb_id = get_imdbid_from_tvdb(response.data.externalIds.tvdbId) - if imdb_id: - logger.debug( - "Could not find imdbId for %s but found it from tvdbId %s", - overseerr_item.title, - response.data.externalIds.tvdbId - ) - return imdb_id - elif not imdb_id and response.data.externalIds.tmdbId: - imdb_id = get_imdbid_from_tmdb(response.data.externalIds.tmdbId) - if imdb_id: - logger.debug( - "Could not find imdbId for %s but found it from tmdbId %s", - overseerr_item.title, - response.data.externalIds.tmdbId - ) - return imdb_id - self.not_found_ids.append(f"{id_extension}{external_id}") - title = getattr(response.data, "title", None) or getattr( - response.data, "originalName", None - ) - logger.debug("Could not get imdbId for %s, or match with external id", title) + if not response.is_ok or not hasattr(response.data, "externalIds"): + logger.debug(f"Failed to fetch or no externalIds for {id_extension}{external_id}") + return None + + title = getattr(response.data, "title", None) or getattr(response.data, "originalName", None) + imdb_id = getattr(response.data.externalIds, 'imdbId', None) + if imdb_id: + return imdb_id + + # Try alternate IDs if IMDb ID is not available + # alternate_ids = [('tvdbId', get_imdbid_from_tvdb), ('tmdbId', get_imdbid_from_tmdb)] + alternate_ids = [('tmdbId', get_imdbid_from_tmdb)] + for id_attr, fetcher in alternate_ids: + external_id_value = getattr(response.data.externalIds, id_attr, None) + if external_id_value: + new_imdb_id = fetcher(external_id_value) + if new_imdb_id: + logger.debug(f"Found imdbId for {title} from {id_attr}: {external_id_value}") + return new_imdb_id + + self.not_found_ids.append(f"{id_extension}{external_id}") + logger.debug(f"Could not get imdbId for {title}, or match with external id") return None diff --git a/backend/program/content/plex_watchlist.py b/backend/program/content/plex_watchlist.py index 9abcbf67..81134f4b 100644 --- a/backend/program/content/plex_watchlist.py +++ b/backend/program/content/plex_watchlist.py @@ -1,7 +1,7 @@ """Plex Watchlist Module""" from typing import Optional from pydantic import BaseModel -from requests import ConnectTimeout +from requests import ConnectTimeout, HTTPError from utils.request import get, ping from utils.logger import logger from utils.settings import settings_manager @@ -29,22 +29,36 @@ def __init__(self, media_items: MediaItemContainer): self.media_items = media_items self.prev_count = 0 self.updater = Trakt() + self.not_found_ids = [] def validate_settings(self): if not self.settings.enabled: logger.debug("Plex Watchlists is set to disabled.") return False if self.settings.rss: + logger.info("Found Plex RSS URL. Validating...") try: - response = ping(self.settings.rss, timeout=15) + response = ping(self.settings.rss) if response.ok: self.rss_enabled = True + logger.info("Plex RSS URL is valid.") return True else: - logger.warn(f"Plex RSS URL is not reachable. Falling back to normal Watchlist.") + logger.info(f"Plex RSS URL is not valid. Falling back to watching user Watchlist.") return True - except Exception: - return False + except HTTPError as e: + if e.response.status_code in [404]: + logger.warn("Plex RSS URL is Not Found. Falling back to watching user Watchlist.") + return True + if e.response.status_code >= 400 and e.response.status_code <= 499: + logger.warn(f"Plex RSS URL is not reachable. Falling back to watching user Watchlist.") + return True + if e.response.status_code >= 500: + logger.error(f"Plex is having issues validating RSS feed. Falling back to watching user Watchlist.") + return True + except Exception as e: + logger.exception("Failed to validate Plex RSS URL: %s", e) + return True return True def run(self): @@ -52,6 +66,13 @@ def run(self): if they are not already there""" items = self._create_unique_list() new_items = [item for item in items if item not in self.media_items] or [] + if len(new_items) == 0: + logger.debug("No new items found in Plex Watchlist") + return + for check in new_items: + if check is None: + new_items.remove(check) + self.not_found_ids.append(check) container = self.updater.create_items(new_items) for item in container: item.set("requested_by", "Plex Watchlist") @@ -68,6 +89,9 @@ def run(self): logger.info("Added %s", item.log_string) elif length > 5: logger.info("Added %s items", length) + if len(self.not_found_ids) >= 1 and len(self.not_found_ids) <= 5: + for item in self.not_found_ids: + logger.info("Failed to add %s", item) def _create_unique_list(self): """Create a unique list of items from Plex RSS and Watchlist""" @@ -80,7 +104,7 @@ def _create_unique_list(self): def _get_items_from_rss(self) -> list: """Fetch media from Plex RSS Feed""" try: - response_obj = get(self.settings.rss, timeout=30) + response_obj = get(self.settings.rss, timeout=60) data = json.loads(response_obj.response.content) items = data.get("items", []) ids = [ diff --git a/backend/program/content/trakt.py b/backend/program/content/trakt.py new file mode 100644 index 00000000..86d057cd --- /dev/null +++ b/backend/program/content/trakt.py @@ -0,0 +1,72 @@ +"""Mdblist content module""" +from time import time +from typing import Optional +from pydantic import BaseModel +from utils.settings import settings_manager +from utils.logger import logger +from utils.request import get, ping +from program.media.container import MediaItemContainer +from program.updaters.trakt import Updater as Trakt, CLIENT_ID + + +class TraktConfig(BaseModel): + enabled: bool + watchlist: Optional[list] + collection: Optional[list] + user_lists: Optional[list] + api_key: Optional[str] + update_interval: int # in seconds + + +class Trakt: + """Content class for Trakt""" + + def __init__(self, media_items: MediaItemContainer): + self.key = "trakt" + self.url = None + self.settings = TraktConfig(**settings_manager.get(f"content.{self.key}")) + self.headers = {"X-Api-Key": self.settings.api_key} + self.initialized = self.validate_settings() + if not self.initialized: + return + self.media_items = media_items + self.updater = Trakt() + self.next_run_time = 0 + logger.info("Trakt initialized!") + + def validate_settings(self) -> bool: + """Validate Trakt settings.""" + return NotImplementedError + + def run(self): + """Fetch media from Trakt and add them to media_items attribute.""" + if time() < self.next_run_time: + return + self.next_run_time = time() + self.settings.update_interval + watchlist_items = self._get_items_from_trakt_watchlist(self.settings.watchlist) + collection_items = self._get_items_from_trakt_collections(self.settings.collection) + user_list_items = self._get_items_from_trakt_list(self.settings.user_lists) + items = list(set(watchlist_items + collection_items + user_list_items)) + new_items = [item for item in items if item not in self.media_items] + container = self.updater.create_items(new_items) + for item in container: + item.set("requested_by", "Trakt") + added_items = self.media_items.extend(container) + length = len(added_items) + if length >= 1 and length <= 5: + for item in added_items: + logger.info("Added %s", item.log_string) + elif length > 5: + logger.info("Added %s items", length) + + def _get_items_from_trakt_watchlist(self, watchlist_items: list) -> list: + """Get items from Trakt watchlist""" + return NotImplementedError + + def _get_items_from_trakt_collections(self, collection_items: list) -> list: + """Get items from Trakt collections""" + return NotImplementedError + + def _get_items_from_trakt_list(self, list_items: list) -> list: + """Get items from Trakt user list""" + return NotImplementedError diff --git a/backend/program/media/container.py b/backend/program/media/container.py index 2d5d5933..266b8eb1 100644 --- a/backend/program/media/container.py +++ b/backend/program/media/container.py @@ -2,6 +2,7 @@ import threading import dill from typing import List, Optional +from utils.logger import logger from program.media.item import MediaItem @@ -24,7 +25,12 @@ def __iadd__(self, other): return self def sort(self, by, reverse): - self.items.sort(key=lambda item: item.get(by), reverse=reverse) + """Sort container by given attribute""" + try: + self.items.sort(key=lambda item: item.get(by), reverse=reverse) + except AttributeError as e: + logger.error("Failed to sort container: %s", e) + pass def __len__(self): """Get length of container""" @@ -50,6 +56,13 @@ def get_item_by_id(self, itemid) -> MediaItem: return my_item return None + def get_item_by_imdb_id(self, imdb_id) -> MediaItem: + """Get item matching given item from container""" + for my_item in self.items: + if my_item.imdb_id == imdb_id: + return my_item + return None + def get_item(self, attr, value) -> "MediaItemContainer": """Get items that match given items""" return next((item for item in self.items if getattr(item, attr) == value), None) diff --git a/backend/program/media/item.py b/backend/program/media/item.py index 71f845ac..1ce55577 100644 --- a/backend/program/media/item.py +++ b/backend/program/media/item.py @@ -1,5 +1,5 @@ -from datetime import datetime import threading +from datetime import datetime from program.media.state import ( Unknown, Content, @@ -27,6 +27,9 @@ def __init__(self, item): self.requested_by = item.get("requested_by", None) self.file = None self.folder = None + self.is_anime = False + self.parsed = False + self.parsed_data = item.get("parsed_data", []) # Media related self.title = item.get("title", None) @@ -109,6 +112,18 @@ def to_extended_dict(self): dict["language"] = (self.language if hasattr(self, "language") else None,) dict["country"] = (self.country if hasattr(self, "country") else None,) dict["network"] = (self.network if hasattr(self, "network") else None,) + dict["active_stream"] = ( + self.active_stream if hasattr(self, "active_stream") else None + ,) + dict["symlinked"] = (self.symlinked if hasattr(self, "symlinked") else None,) + dict["parsed"] = (self.parsed if hasattr(self, "parsed") else None,) + dict["parsed_data"] = (self.parsed_data if hasattr(self, "parsed_data") else None,) + dict["is_anime"] = (self.is_anime if hasattr(self, "is_anime") else None,) + dict["update_folder"] = ( + self.update_folder if hasattr(self, "update_folder") else None + ,) + dict["file"] = (self.file if hasattr(self, "file") else None,) + dict["folder"] = (self.folder if hasattr(self, "folder") else None,) return dict def __iter__(self): @@ -236,7 +251,8 @@ def __init__(self, item): super().__init__(item) def __eq__(self, other): - return self.number == other.number + if type(self) == type(other) and self.parent == other.parent: + return self.number == other.number def __repr__(self): return f"Episode:{self.number}:{self.state.__class__.__name__}" diff --git a/backend/program/plex.py b/backend/program/plex.py index 809d3aba..496beb9f 100644 --- a/backend/program/plex.py +++ b/backend/program/plex.py @@ -7,7 +7,9 @@ from datetime import datetime from typing import Optional from plexapi.server import PlexServer +from plexapi.exceptions import BadRequest, Unauthorized from pydantic import BaseModel +# from program.updaters.trakt import get_imdbid_from_tvdb from utils.logger import logger from utils.settings import settings_manager as settings from program.media.container import MediaItemContainer @@ -44,15 +46,21 @@ def __init__(self, media_items: MediaItemContainer): self.plex = PlexServer( self.settings.url, self.settings.token, timeout=60 ) - self.running = False - self.log_worker_count = False - self.media_items = media_items - self._update_items(init=True) - except Exception: - logger.error("Plex is not configured!") + except Unauthorized: + logger.warn("Plex is not authorized!") return - logger.info("Plex initialized!") + except BadRequest as e: + logger.error("Plex is not configured correctly: %s", e) + return + except Exception as e: + logger.error("Plex exception thrown: %s", e) + return + self.running = False + self.log_worker_count = False + self.media_items = media_items + self._update_items(init=True) self.initialized = True + logger.info("Plex initialized!") def run(self): while self.running: @@ -200,6 +208,7 @@ def _map_item_from_data(item): if item.type in ["movie", "episode"]: file = getattr(item, "locations", [None])[0].split("/")[-1] genres = [genre.tag for genre in getattr(item, "genres", [])] + is_anime = "anime" in genres title = getattr(item, "title", None) key = getattr(item, "key", None) season_number = getattr(item, "seasonNumber", None) @@ -216,20 +225,17 @@ def _map_item_from_data(item): ) aired_at = getattr(item, "originallyAvailableAt", None) - # All movies have imdb, but not all shows do. - # This is due to season 0 (specials) not having imdb ids. # Attempt to get the imdb id from the tvdb id if we don't have it. - # Needs more testing.. + # Uses Trakt to get the imdb id from the tvdb id. # if not imdb_id: - # logger.debug("Unable to find imdb, trying tvdb for %s", title) # tvdb_id = next( # (guid.id.split("://")[-1] for guid in guids if "tvdb" in guid.id), None # ) # if tvdb_id: - # logger.debug("Unable to find imdb, but found tvdb: %s", tvdb_id) # imdb_id = get_imdbid_from_tvdb(tvdb_id) # if imdb_id: - # logger.debug("Found imdb from tvdb: %s", imdb_id) + # logger.debug("%s was missing IMDb ID, found IMDb ID from TVdb ID: %s", title, imdb_id) + # If we still don't have an imdb id, we could check TMdb or use external services like cinemeta. media_item_data = { "title": title, @@ -241,6 +247,7 @@ def _map_item_from_data(item): "guid": guid, "art_url": art_url, "file": file, + "is_anime": is_anime, } # Instantiate the appropriate subclass based on 'item_type' diff --git a/backend/program/realdebrid.py b/backend/program/realdebrid.py index fa033bd6..60617dde 100644 --- a/backend/program/realdebrid.py +++ b/backend/program/realdebrid.py @@ -130,6 +130,9 @@ def chunks(lst, n): "active_stream", {"hash": stream_hash, "files": wanted_files, "id": None}, ) + all_filenames = [file_info["filename"] for file_info in wanted_files.values()] + for file in all_filenames: + logger.debug(f"Found cached file {file} for {item.log_string}") return True item.streams[stream_hash] = None return False diff --git a/backend/program/scrapers/__init__.py b/backend/program/scrapers/__init__.py index ae87ac97..b9fb3f42 100644 --- a/backend/program/scrapers/__init__.py +++ b/backend/program/scrapers/__init__.py @@ -2,8 +2,8 @@ from pydantic import BaseModel from utils.service_manager import ServiceManager from utils.settings import settings_manager as settings +# from utils.parser import parser, sort_streams from utils.logger import logger -from utils.parser import parser from .torrentio import Torrentio from .orionoid import Orionoid from .jackett import Jackett @@ -19,7 +19,7 @@ def __init__(self, _): self.key = "scraping" self.initialized = False self.settings = ScrapingConfig(**settings.get(self.key)) - self.sm = ServiceManager(None, False, Torrentio, Orionoid, Jackett) + self.sm = ServiceManager(None, False, Orionoid, Torrentio, Jackett) if not any(service.initialized for service in self.sm.services): logger.error( "You have no scraping services enabled, please enable at least one!" @@ -34,6 +34,8 @@ def run(self, item) -> None: service.run(item) item.set("scraped_at", datetime.now()) item.set("scraped_times", item.scraped_times + 1) + # sorted_streams = sort_streams(item.streams, parser) + # item.set("streams", sorted_streams) def _can_we_scrape(self, item) -> bool: return self._is_released(item) and self._needs_new_scrape(item) diff --git a/backend/program/scrapers/jackett.py b/backend/program/scrapers/jackett.py index 90063eb5..97ddea92 100644 --- a/backend/program/scrapers/jackett.py +++ b/backend/program/scrapers/jackett.py @@ -1,16 +1,18 @@ """ Jackett scraper module """ +import traceback from typing import Optional from pydantic import BaseModel from requests import ReadTimeout, RequestException from utils.logger import logger from utils.settings import settings_manager from utils.parser import parser -from utils.request import RateLimitExceeded, get, RateLimiter +from utils.request import RateLimitExceeded, get, RateLimiter, ping class JackettConfig(BaseModel): enabled: bool url: Optional[str] + api_key: Optional[str] class Jackett: @@ -21,10 +23,11 @@ def __init__(self, _): self.api_key = None self.settings = JackettConfig(**settings_manager.get(f"scraping.{self.key}")) self.initialized = self.validate_settings() - if not self.initialized or not self.api_key: + if not self.initialized and not self.api_key: return self.minute_limiter = RateLimiter(max_calls=60, period=60, raise_on_limit=True) - self.second_limiter = RateLimiter(max_calls=1, period=3) + self.second_limiter = RateLimiter(max_calls=1, period=10) + self.parse_logging = False logger.info("Jackett initialized!") def validate_settings(self) -> bool: @@ -32,6 +35,18 @@ def validate_settings(self) -> bool: if not self.settings.enabled: logger.debug("Jackett is set to disabled.") return False + if self.settings.url and self.settings.api_key: + self.api_key = self.settings.api_key + try: + url = f"{self.settings.url}/api/v2.0/indexers/!status:failing,test:passed/results/torznab?apikey={self.api_key}&cat=2000&t=movie&q=test" + response = ping(url=url, timeout=60) + if response.ok: + return True + except ReadTimeout: + return True + except Exception as e: + logger.exception("Jackett failed to initialize with API Key: %s", e) + return False if self.settings.url: try: url = f"{self.settings.url}/api/v2.0/server/config" @@ -39,30 +54,42 @@ def validate_settings(self) -> bool: if response.is_ok and response.data.api_key is not None: self.api_key = response.data.api_key return True + if not response.is_ok: + return False except ReadTimeout: return True - except Exception: + except Exception as e: + logger.exception("Jackett failed to initialize: %s", e) return False logger.info("Jackett is not configured and will not be used.") return False def run(self, item): """Scrape Jackett for the given media items""" + if item is None or not self.initialized: + return try: self._scrape_item(item) - except RequestException: + except RateLimitExceeded as e: + self.minute_limiter.limit_hit() + logger.warn("Jackett rate limit hit for item: %s", item.log_string) + return + except RequestException as e: self.minute_limiter.limit_hit() + logger.exception("Jackett request exception: %s", e, exc_info=True) return - except RateLimitExceeded: + except Exception as e: self.minute_limiter.limit_hit() + # logger.debug("Jackett exception for item: %s - Exception: %s", item.log_string, e.args[0], exc_info=True) + # logger.debug("Exception details: %s", traceback.format_exc()) return def _scrape_item(self, item): """Scrape the given media item""" - data = self.api_scrape(item) + data, stream_count = self.api_scrape(item) if len(data) > 0: item.streams.update(data) - logger.debug("Found %s streams for %s", len(data), item.log_string) + logger.info("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) else: logger.debug("Could not find streams for %s", item.log_string) @@ -72,25 +99,32 @@ def api_scrape(self, item): with self.minute_limiter: query = "" if item.type == "movie": - query = f"&cat=2010,2020,2030,2040,2045,2050,2080&t=movie&q={item.title} {item.aired_at.year}" + query = f"&cat=2000,2010,2020,2030,2040,2045,2050,2080&t=movie&q={item.title}&year{item.aired_at.year}" if item.type == "season": - query = f"&cat=5010,5020,5030,5040,5045,5050,5060,5070,5080&t=tvsearch&q={item.parent.title}&season={item.number}" + query = f"&cat=5000,5010,5020,5030,5040,5045,5050,5060,5070,5080&t=tvsearch&q={item.parent.title}&season={item.number}" if item.type == "episode": - query = f"&cat=5010,5020,5030,5040,5045,5050,5060,5070,5080&t=tvsearch&q={item.parent.parent.title}&season={item.parent.number}&ep={item.number}" - url = (f"{self.settings.url}/api/v2.0/indexers/!status:failing,test:passed/results/torznab?apikey={self.api_key}{query}") + query = f"&cat=5000,5010,5020,5030,5040,5045,5050,5060,5070,5080&t=tvsearch&q={item.parent.parent.title}&season={item.parent.number}&ep={item.number}" + url = f"{self.settings.url}/api/v2.0/indexers/!status:failing,test:passed/results/torznab?apikey={self.api_key}{query}" with self.second_limiter: response = get(url=url, retry_if_failed=False, timeout=60) if response.is_ok: data = {} - for stream in response.data['rss']['channel'].get('item', []): - title = stream.get('title') - if parser.check_for_title_match(item, title): - if parser.parse(title): - attr = stream.get('torznab:attr', []) - infohash_attr = next((a for a in attr if a.get('@name') == 'infohash'), None) - if infohash_attr: - infohash = infohash_attr.get('@value') - data[infohash] = {"name": title} - if len(data) > 0: - return parser.sort_streams(data) - return {} + streams = response.data["rss"]["channel"].get("item", []) + parsed_data_list = [parser.parse(item, stream.get("title")) for stream in streams if type(stream) != str] + for stream, parsed_data in zip(streams, parsed_data_list): + if type(stream) == str: + logger.debug("Found another string: %s", stream) + continue + if parsed_data.get("fetch", True) and parsed_data.get("title_match", False): + attr = stream.get("torznab:attr", []) + infohash_attr = next((a for a in attr if a.get("@name") == "infohash"), None) + if infohash_attr: + infohash = infohash_attr.get("@value") + data[infohash] = {"name": stream.get("title")} + if self.parse_logging: + for parsed_data in parsed_data_list: + logger.debug("Jackett Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) + if data: + item.parsed_data.extend(parsed_data_list) + return data, len(streams) + return {}, 0 diff --git a/backend/program/scrapers/orionoid.py b/backend/program/scrapers/orionoid.py index 98d0d02b..2b6b18d4 100644 --- a/backend/program/scrapers/orionoid.py +++ b/backend/program/scrapers/orionoid.py @@ -1,6 +1,7 @@ """ Orionoid scraper module """ from typing import Optional from pydantic import BaseModel +from requests import ConnectTimeout from requests.exceptions import RequestException from utils.logger import logger from utils.request import RateLimitExceeded, RateLimiter, get @@ -28,11 +29,13 @@ def __init__(self, _): self.initialized = True else: return - self.max_calls = 50 if not self.is_premium else 999999 - self.minute_limiter = RateLimiter( - max_calls=self.max_calls, period=86400, raise_on_limit=True - ) - self.second_limiter = RateLimiter(max_calls=1, period=1) + self.orionoid_limit = 0 + self.orionoid_remaining = 0 + self.parse_logging = False + self.max_calls = 100 if not self.is_premium else 60 + self.period = 86400 if not self.is_premium else 60 + self.minute_limiter = RateLimiter(max_calls=self.max_calls, period=self.period, raise_on_limit=True) + self.second_limiter = RateLimiter(max_calls=1, period=10) logger.info("Orionoid initialized!") def validate_settings(self) -> bool: @@ -42,8 +45,20 @@ def validate_settings(self) -> bool: return False if self.settings.api_key: return True - logger.info("Orionoid is not configured and will not be used.") - return False + try: + url = f"https://api.orionoid.com?keyapp={KEY_APP}&keyuser={self.settings.api_key}&mode=user&action=retrieve" + response = get(url, retry_if_failed=False) + if response.is_ok: + return True + if not response.data.result.status == "success": + logger.error(f"Orionoid API Key is invalid. Status: {response.data.result.status}") + return False + if not response.is_ok: + logger.error(f"Orionoid Status Code: {response.status_code}, Reason: {response.reason}") + return False + except Exception as e: + logger.exception("Orionoid failed to initialize: %s", e) + return False def check_premium(self) -> bool: """ @@ -66,20 +81,32 @@ def check_premium(self) -> bool: def run(self, item): """Scrape the Orionoid site for the given media items and update the object with scraped streams""" + if item is None or not self.initialized: + return try: self._scrape_item(item) - except RequestException: + except ConnectTimeout: self.minute_limiter.limit_hit() + logger.warn("Orionoid connection timeout for item: %s", item.log_string) + return + except RequestException as e: + self.minute_limiter.limit_hit() + logger.exception("Orionoid request exception: %s", e) return except RateLimitExceeded: self.minute_limiter.limit_hit() + logger.warn("Orionoid rate limit hit for item: %s", item.log_string) + return + except Exception as e: + self.minute_limiter.limit_hit() + logger.exception("Orionoid exception for item: %s - Exception: %s", item.log_string, e) return def _scrape_item(self, item): - data = self.api_scrape(item) + data, stream_count = self.api_scrape(item) if len(data) > 0: item.streams.update(data) - logger.debug("Found %s streams for %s", len(data), item.log_string) + logger.info("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) else: logger.debug("Could not find streams for %s", item.log_string) @@ -124,12 +151,28 @@ def api_scrape(self, item): with self.second_limiter: response = get(url, retry_if_failed=False, timeout=60) - if response.is_ok and response.data.result.status != "error": - data = {} - for stream in response.data.data.streams: - title = stream.file.name - if parser.parse(title) and stream.file.hash: - data[stream.file.hash] = {"name": title} - if len(data) > 0: - return parser.sort_streams(data) - return {} \ No newline at end of file + if response.is_ok and hasattr(response.data, "data"): + + # Check and log Orionoid API limits + # self.orionoid_limit = response.data.data.requests.daily.limit + # self.orionoid_remaining = response.data.data.requests.daily.remaining + # if self.orionoid_remaining < 10: + # logger.warning(f"Orionoid API limit is low. Limit: {self.orionoid_limit}, Remaining: {self.orionoid_remaining}") + + parsed_data_list = [ + parser.parse(item, stream.file.name) + for stream in response.data.data.streams + if stream.file.hash + ] + data = { + stream.file.hash: {"name": stream.file.name} + for stream, parsed_data in zip(response.data.data.streams, parsed_data_list) + if parsed_data["fetch"] + } + if self.parse_logging: + for parsed_data in parsed_data_list: + logger.debug("Orionoid Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) + if data: + item.parsed_data.extend(parsed_data_list) + return data, len(response.data.data.streams) + return {}, 0 \ No newline at end of file diff --git a/backend/program/scrapers/torrentio.py b/backend/program/scrapers/torrentio.py index 97195852..454460f2 100644 --- a/backend/program/scrapers/torrentio.py +++ b/backend/program/scrapers/torrentio.py @@ -1,15 +1,18 @@ """ Torrentio scraper module """ +import os from typing import Optional from pydantic import BaseModel +from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException from utils.logger import logger -from utils.request import RateLimitExceeded, get, RateLimiter +from utils.request import RateLimitExceeded, get, RateLimiter, ping from utils.settings import settings_manager from utils.parser import parser class TorrentioConfig(BaseModel): enabled: bool + url: Optional[str] filter: Optional[str] @@ -20,10 +23,11 @@ def __init__(self, _): self.key = "torrentio" self.settings = TorrentioConfig(**settings_manager.get(f"scraping.{self.key}")) self.minute_limiter = RateLimiter(max_calls=60, period=60, raise_on_limit=True) - self.second_limiter = RateLimiter(max_calls=1, period=3) + self.second_limiter = RateLimiter(max_calls=1, period=10) self.initialized = self.validate_settings() if not self.initialized: return + self.parse_logging = False logger.info("Torrentio initialized!") def validate_settings(self) -> bool: @@ -31,27 +35,59 @@ def validate_settings(self) -> bool: if not self.settings.enabled: logger.debug("Torrentio is set to disabled.") return False + if not self.settings.url: + logger.error("Torrentio URL is not configured and will not be used.") + return False + try: + url = f"{self.settings.url}/{self.settings.filter}/stream/movie/tt0068646.json" + response = ping(url=url, timeout=10) + if response.ok: + return True + except Exception as e: + logger.exception("Torrentio failed to initialize: %s", e) + return False return True - def run(self, item) -> None: + def run(self, item): """Scrape the torrentio site for the given media items and update the object with scraped streams""" + if item is None or not self.initialized: + return try: self._scrape_item(item) - except RequestException: + except RateLimitExceeded: self.minute_limiter.limit_hit() + logger.warn("Torrentio rate limit hit for item: %s", item.log_string) return - except RateLimitExceeded: + except ConnectTimeout: + self.minute_limiter.limit_hit() + logger.warn("Torrentio connection timeout for item: %s", item.log_string) + return + except ReadTimeout: + self.minute_limiter.limit_hit() + return + except RequestException as e: + self.minute_limiter.limit_hit() + logger.warn("Torrentio request exception: %s", e) + return + except AttributeError: + # TODO: will fix later + self.minute_limiter.limit_hit() + return + except Exception as e: self.minute_limiter.limit_hit() + logger.warn("Torrentio failed to scrape item: %s", e) return def _scrape_item(self, item): - data = self.api_scrape(item) + """Scrape torrentio for the given media item""" + data, stream_count = self.api_scrape(item) if len(data) > 0: item.streams.update(data) - logger.debug("Found %s streams for %s", len(data), item.log_string) + logger.info("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) else: - logger.debug("Could not find streams for %s", item.log_string) + if stream_count > 0: + logger.debug("Could not find good streams for %s out of %s", item.log_string, stream_count) def api_scrape(self, item): """Wrapper for torrentio scrape method""" @@ -70,23 +106,26 @@ def api_scrape(self, item): imdb_id = item.imdb_id url = ( - f"https://torrentio.strem.fun/{self.settings.filter}" + f"{self.settings.url}/{self.settings.filter}" + f"/stream/{scrape_type}/{imdb_id}" ) if identifier: - url += f"{identifier}" + url += identifier with self.second_limiter: - response = get(f"{url}.json", retry_if_failed=False, timeout=30) - if response.is_ok: - data = {} - if len(response.data.streams) == 0: - return data - for stream in response.data.streams: - title = stream.title.split("\n👤")[0] - if parser.parse(title): - data[stream.infoHash] = { - "name": title, - } - if len(data) > 0: - return parser.sort_streams(data) - return {} + response = get(f"{url}.json", retry_if_failed=False, timeout=60) + if response.is_ok and len(response.data.streams) > 0: + parsed_data_list = [ + parser.parse(item, stream.title.split("\n👤")[0].split("\n")[0]) for stream in response.data.streams + ] + data = { + stream.infoHash: {"name": stream.title.split("\n👤")[0].split("\n")[0]} + for stream, parsed_data in zip(response.data.streams, parsed_data_list) + if parsed_data.get("fetch", False) and parsed_data.get("string", False) + } + if self.parse_logging: + for parsed_data in parsed_data_list: + logger.debug("Torrentio Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) + if data: + item.parsed_data.extend(parsed_data_list) + return data, len(response.data.streams) + return {}, 0 diff --git a/backend/program/symlink.py b/backend/program/symlink.py index 62e5d167..f3610ef3 100644 --- a/backend/program/symlink.py +++ b/backend/program/symlink.py @@ -1,5 +1,6 @@ """Symlinking module""" import os +from pathlib import Path from typing import Optional from pydantic import BaseModel from utils.settings import settings_manager as settings @@ -7,8 +8,8 @@ class SymlinkConfig(BaseModel): - host_path: Optional[str] - container_path: Optional[str] + host_path: Path + container_path: Path class Symlinker(): @@ -23,40 +24,70 @@ class Symlinker(): host_path (str): The absolute path of the host mount. symlink_path (str): The path where the symlinks will be created. """ - def __init__(self, _): self.key = "symlink" self.settings = SymlinkConfig(**settings.get(self.key)) self.initialized = False - self.library_path = os.path.join( - os.path.dirname(self.settings.host_path), "library" - ) - self.library_path_movies = os.path.join(self.library_path, "movies") - self.library_path_shows = os.path.join(self.library_path, "shows") + + if (self.settings.host_path / "__all__").exists(): + logger.debug("Detected Zurg host path. Using __all__ folder for host path.") + settings.set(self.key, self.settings.host_path) + self.settings.host_path = Path(self.settings.host_path) / "__all__" + elif (self.settings.host_path / "torrents").exists(): + logger.debug("Detected standard rclone host path. Using torrents folder for host path.") + settings.set(self.key, self.settings.host_path) + self.settings.host_path = Path(self.settings.host_path) / "torrents" + + self.library_path = self.settings.host_path.parent / "library" + if not self.validate(): - logger.error("Symlink is not configured and will not be used.") + logger.error("Symlink configuration is invalid. Please check the host and container paths.") + return + + self.initialize_library_paths() + + if not self.create_initial_folders(): + logger.error("Failed to create initial library folders.") return - self._create_init_folders() + + logger.info("Found rclone mount path: %s", self.settings.host_path) + logger.info("Symlinks will be placed in library path: %s", self.library_path) + logger.info("Plex will see the symlinks in: %s", self.settings.container_path.parent / "library") logger.info("Symlink initialized!") self.initialized = True def validate(self): - return os.path.exists(self.settings.host_path) and self.settings.host_path != "" and self.settings.container_path != "" - - def _create_init_folders(self): - movies = os.path.join(self.library_path_movies) - shows = os.path.join(self.library_path_shows) - if not os.path.exists(self.library_path): - os.mkdir(self.library_path) - if not os.path.exists(movies): - os.mkdir(movies) - if not os.path.exists(shows): - os.mkdir(shows) + if not self.settings.host_path or not self.settings.container_path: + return False + host_path = Path(self.settings.host_path) + if not host_path.exists() or not host_path.is_dir(): + logger.error(f"Invalid host path: {self.settings.host_path}") + return False + return True + + def initialize_library_paths(self): + self.library_path_movies = self.library_path / "movies" + self.library_path_shows = self.library_path / "shows" + self.library_path_anime_movies = self.library_path / "anime_movies" + self.library_path_anime_shows = self.library_path / "anime_shows" + + def create_initial_folders(self): + for library in [self.library_path_movies, + self.library_path_shows, + self.library_path_anime_movies, + self.library_path_anime_shows]: + try: + library.mkdir(parents=True, exist_ok=True) + except Exception as e: + logger.error("Failed to create directory %s: %s", library, e) + return False + return True def run(self, item): self._run(item) def _determine_file_name(self, item): + """Determine the filename of the symlink.""" filename = None if item.type == "movie": filename = ( @@ -77,6 +108,7 @@ def _determine_file_name(self, item): return filename def _run(self, item): + """Check if the media item exists and create a symlink if it does""" found = False if os.path.exists(os.path.join(self.settings.host_path, item.folder, item.file)): found = True @@ -90,6 +122,7 @@ def _run(self, item): self._symlink(item) def _symlink(self, item): + """Create a symlink for the given media item""" extension = item.file.split(".")[-1] symlink_filename = f"{self._determine_file_name(item)}.{extension}" diff --git a/backend/program/updaters/trakt.py b/backend/program/updaters/trakt.py index 10a61b2d..8009fa60 100644 --- a/backend/program/updaters/trakt.py +++ b/backend/program/updaters/trakt.py @@ -21,6 +21,9 @@ def __init__(self): def create_items(self, imdb_ids): """Update media items to state where they can start downloading""" + if len(imdb_ids) == 0: + return MediaItemContainer() + self.trakt_data.load(self.pkl_file) new_items = MediaItemContainer() get_items = MediaItemContainer() @@ -28,12 +31,12 @@ def create_items(self, imdb_ids): existing_imdb_ids = {item.imdb_id for item in self.trakt_data.items if item} # This is to calculate 10% batch sizes to speed up the process - batch_size = math.ceil(len(imdb_ids) * 0.1) + batch_size = math.ceil(len(imdb_ids) * 0.1) or 1 imdb_id_batches = [imdb_ids[i:i + batch_size] for i in range(0, len(imdb_ids), batch_size)] with concurrent.futures.ThreadPoolExecutor() as executor: for imdb_id_batch in imdb_id_batches: - future_items = {executor.submit(self._create_item, imdb_id): imdb_id for imdb_id in imdb_id_batch if imdb_id not in existing_imdb_ids} + future_items = {executor.submit(self._create_item, imdb_id): imdb_id for imdb_id in imdb_id_batch if imdb_id not in existing_imdb_ids or imdb_id is not None} for future in concurrent.futures.as_completed(future_items): item = future.result() if item: @@ -54,11 +57,12 @@ def create_items(self, imdb_ids): if length > 0: self.trakt_data.extend(added_items) self.trakt_data.save(self.pkl_file) - return get_items def _create_item(self, imdb_id): item = create_item_from_imdb_id(imdb_id) + if item is None: + return None if item and item.type == "show": seasons = get_show(imdb_id) for season in seasons: @@ -73,6 +77,9 @@ def _create_item(self, imdb_id): def _map_item_from_data(data, item_type): """Map trakt.tv API data to MediaItemContainer""" + if item_type not in ["movie", "show", "season", "episode"]: + logger.debug("Unknown item type %s for %s not found in list of acceptable objects", item_type, data.title) + return None formatted_aired_at = None if getattr(data, "first_aired", None) and ( item_type == "show" @@ -89,8 +96,7 @@ def _map_item_from_data(data, item_type): "title": getattr(data, "title", None), # 'Game of Thrones' "year": getattr(data, "year", None), # 2011 "status": getattr(data, "status", None), # 'ended', 'released', 'returning series' - "aired_at": formatted_aired_at, # datetime.datetime(2011, 4, 17, 0, 0) - "is_anime": is_anime, # True" + "aired_at": formatted_aired_at, # datetime.datetime(2011, 4, 17, 0, 0) # True" "imdb_id": getattr(data.ids, "imdb", None), # 'tt0496424' "tvdb_id": getattr(data.ids, "tvdb", None), # 79488 "tmdb_id": getattr(data.ids, "tmdb", None), # 1399 @@ -100,10 +106,13 @@ def _map_item_from_data(data, item_type): "language": getattr(data, "language", None), # 'en' "requested_at": datetime.now(), # datetime.datetime(2021, 4, 17, 0, 0) } + match item_type: case "movie": + item["is_anime"] = is_anime return_item = Movie(item) case "show": + item["is_anime"] = is_anime return_item = Show(item) case "season": item["number"] = getattr(data, "number") @@ -112,6 +121,7 @@ def _map_item_from_data(data, item_type): item["number"] = getattr(data, "number") return_item = Episode(item) case _: + logger.debug("Unknown item type %s for %s", item_type, data.title) return_item = None return return_item @@ -132,21 +142,32 @@ def get_show(imdb_id: str): def create_item_from_imdb_id(imdb_id: str): """Wrapper for trakt.tv API search method""" + if imdb_id is None: + logger.debug("Unable to create item from IMDb ID. No IMDb ID provided.") + return url = f"https://api.trakt.tv/search/imdb/{imdb_id}?extended=full" response = get( url, additional_headers={"trakt-api-version": "2", "trakt-api-key": CLIENT_ID}, ) - if response.is_ok: - if len(response.data) > 0: + if response.is_ok and len(response.data) > 0: + try: media_type = response.data[0].type if media_type == "movie": data = response.data[0].movie - else: + elif media_type == "show": data = response.data[0].show + elif media_type == "season": + data = response.data[0].season + elif media_type == "episode": + data = response.data[0].episode if data: return _map_item_from_data(data, media_type) - return None + except UnboundLocalError: + logger.error("Unknown item %s with response %s", imdb_id, response) + return + logger.error("Unable to create item from IMDb ID %s", imdb_id) + return def get_imdbid_from_tvdb(tvdb_id: str) -> str: """Get IMDb ID from TVDB ID in Trakt""" diff --git a/backend/pytest.ini b/backend/pytest.ini index a82a97bc..2375392f 100644 --- a/backend/pytest.ini +++ b/backend/pytest.ini @@ -1,6 +1,6 @@ [pytest] minversion = 7.0 -; addopts = --cov=tests/. +addopts = -vv -W ignore::DeprecationWarning pythonpath = . testpaths = tests \ No newline at end of file diff --git a/backend/tests/items_test.py b/backend/tests/test_items.py similarity index 92% rename from backend/tests/items_test.py rename to backend/tests/test_items.py index a1dafd0d..ea76c37f 100644 --- a/backend/tests/items_test.py +++ b/backend/tests/test_items.py @@ -18,7 +18,7 @@ def test_get_states(): assert response.status_code == 200 assert response.json() == { "success": True, - "states": [state for state in MediaItemStates], + "states": [state.value for state in MediaItemStates], } diff --git a/backend/tests/test_parser.py b/backend/tests/test_parser.py new file mode 100644 index 00000000..c745be0d --- /dev/null +++ b/backend/tests/test_parser.py @@ -0,0 +1,54 @@ +import pytest +from utils.parser import Parser + + +@pytest.fixture +def parser(): + return Parser() + +# Test parser +def test_fetch_with_movie(parser): + # Use mocked movie item in parser test + parsed_data = parser.parse(item=None, string="Inception 2010 1080p BluRay x264") + assert parsed_data["fetch"] == True + # Add more assertions as needed + +def test_fetch_with_episode(parser): + # Use mocked episode item in parser test + parsed_data = parser.parse(item=None, string="Breaking Bad S01E01 720p BluRay x264") + assert parsed_data["fetch"] == True + # Add more assertions as needed + +def test_parse_resolution_4k(parser): + parsed_data = parser.parse(item=None, string="Movie.Name.2018.2160p.UHD.BluRay.x265") + assert parsed_data["is_4k"] == True + assert parsed_data["resolution"] == "2160p" + +def test_parse_resolution_1080p(parser): + parsed_data = parser.parse(item=None, string="Another.Movie.2019.1080p.WEB-DL.x264") + assert parsed_data["is_4k"] == False + assert parsed_data["resolution"] == "1080p" + +def test_parse_dual_audio_present(parser): + parsed_data = parser.parse(item=None, string="Series S01E01 720p BluRay x264 Dual-Audio") + assert parsed_data["is_dual_audio"] == True + +def test_parse_dual_audio_absent(parser): + parsed_data = parser.parse(item=None, string="Series S01E02 720p BluRay x264") + assert parsed_data["is_dual_audio"] == False + +def test_parse_complete_series_detected(parser): + parsed_data = parser.parse(item=None, string="The Complete Series Box Set 1080p") + assert parsed_data["is_complete"] == True + +def test_parse_complete_series_not_detected(parser): + parsed_data = parser.parse(item=None, string="Single.Movie.2020.1080p.BluRay") + assert parsed_data["is_complete"] == False + +def test_parse_unwanted_quality_detected(parser): + parsed_data = parser.parse(item=None, string="Low.Quality.Movie.CAM.2020") + assert parsed_data["is_unwanted_quality"] == True + +def test_parse_unwanted_quality_not_detected(parser): + parsed_data = parser.parse(item=None, string="High.Quality.Movie.1080p.2020") + assert parsed_data["is_unwanted_quality"] == False diff --git a/backend/tests/test_torrentio.py b/backend/tests/test_torrentio.py deleted file mode 100644 index 74c2f256..00000000 --- a/backend/tests/test_torrentio.py +++ /dev/null @@ -1,27 +0,0 @@ -import pytest -from unittest.mock import Mock -from program.scrapers.torrentio import Torrentio, TorrentioConfig - - -def test_torrentio_initialization(): - torrentio = Torrentio(None) - assert torrentio is not None - assert torrentio.key == "torrentio" - -@pytest.mark.parametrize("enabled,filter,expected", [ - (True, "sort=qusize%7Cqualityfilter=480p,scr,cam,unknown", True), - (False, None, False), -]) -def test_validate_settings(enabled, filter, expected): - settings = TorrentioConfig(enabled=enabled, filter=filter) - torrentio = Torrentio(None) - torrentio.settings = settings - assert torrentio.validate_settings() == expected - -def test_api_scrape_basic(): - torrentio = Torrentio(None) - torrentio.settings = TorrentioConfig(enabled=True, filter="sort=qualitysize%7Cqualityfilter=480p,scr,cam,unknown") - item = Mock() # TODO: Create a better mock item - result = torrentio.api_scrape(item) - assert result is not None - # Need to add more.. but this is a start \ No newline at end of file diff --git a/backend/utils/default_settings.json b/backend/utils/default_settings.json index 2f13f05f..49551b00 100644 --- a/backend/utils/default_settings.json +++ b/backend/utils/default_settings.json @@ -1,5 +1,5 @@ { - "version": "0.4.3", + "version": "0.4.5", "debug": true, "log": true, "symlink": { @@ -33,7 +33,7 @@ "update_interval": 80 }, "overseerr": { - "enabled": true, + "enabled": false, "url": "http://localhost:5055", "api_key": "" } @@ -43,8 +43,9 @@ "after_5": 2, "after_10": 24, "torrentio": { - "enabled": true, - "filter": "sort=qualitysize%7Cqualityfilter=480p,scr,cam,unknown" + "enabled": false, + "url": "https://torrentio.strem.fun", + "filter": "sort=qualitysize%7Cqualityfilter=480p,scr,cam" }, "orionoid": { "enabled": false, @@ -52,7 +53,8 @@ }, "jackett": { "enabled": false, - "url": "http://localhost:9117" + "url": "http://localhost:9117", + "api_key": "" } }, "parser": { diff --git a/backend/utils/parser.py b/backend/utils/parser.py index 0e48331d..ba932fa3 100644 --- a/backend/utils/parser.py +++ b/backend/utils/parser.py @@ -17,23 +17,25 @@ class Parser: def __init__(self): self.settings = ParserConfig(**settings_manager.get("parser")) - self.language = self.settings.language or ["English"] - self.resolution = ["1080p", "720p"] - self.unwanted_codec = ["H.263", "Xvid"] # Bad for transcoding - self.quality = [None, "Blu-ray", "WEB-DL", "WEBRip", "HDRip", - "HDTVRip", "BDRip", "Pay-Per-View Rip"] - self.validate_settings() - - def validate_settings(self): + self.language = self.settings.language + self.resolution = self.determine_resolution() + + def determine_resolution(self): + """Determine the resolution to use based on user settings.""" if self.settings.highest_quality: - self.resolution = ["UHD", "2160p", "4K", "1080p", "720p"] - elif self.settings.include_4k: - self.resolution = ["2160p", "4K", "1080p", "720p"] - else: - self.resolution = ["1080p", "720p"] + return ["UHD", "2160p", "4K", "1080p", "720p"] + if self.settings.include_4k: + return ["2160p", "4K", "1080p", "720p"] + return ["1080p", "720p"] + + def parse(self, item, string) -> dict: + """Parse the given string and return True if it matches the user settings.""" + return self._parse(item, string) - def _parse(self, string): + def _parse(self, item, string) -> dict: + """Parse the given string and return the parsed data.""" parse = PTN.parse(string) + parsed_title = parse.get("title", "") # episodes episodes = [] @@ -45,156 +47,178 @@ def _parse(self, string): else: episodes.append(int(episode)) - title = parse.get("title") - season = parse.get("season") - audio = parse.get("audio") - codec = parse.get("codec") - resolution = parse.get("resolution") - quality = parse.get("quality") - subtitles = parse.get("subtitles") - language = parse.get("language") - hdr = parse.get("hdr") - upscaled = parse.get("upscaled") - remastered = parse.get("remastered") - proper = parse.get("proper") - repack = parse.get("repack") - remux = parse.get("remux") - if not language: - language = "English" - extended = parse.get("extended") - - return { - "title": title, - "resolution": resolution or [], - "quality": quality or [], - "season": season, - "episodes": episodes or [], - "codec": codec or [], - "audio": audio or [], - "hdr": hdr or False, - "upscaled": upscaled or False, - "remastered": remastered or False, - "proper": proper or False, - "repack": repack or False, - "subtitles": True if subtitles == "Available" else False, - "language": language or [], - "remux": remux or False, - "extended": extended, + title_match = self.check_for_title_match(item, parsed_title) + is_4k = parse.get("resolution", False) in ["2160p", "4K", "UHD"] + is_complete = self._is_complete_series(string) + is_dual_audio = self._is_dual_audio(string) + _is_unwanted_quality = self._is_unwanted_quality(string) + + parsed_data = { + "string": string, + "parsed_title": parsed_title, + "title_match": title_match, + "fetch": False, + "is_4k": is_4k, + "is_dual_audio": is_dual_audio, + "is_complete": is_complete, + "is_unwanted_quality": _is_unwanted_quality, + "year": parse.get("year", False), + "resolution": parse.get("resolution", []), + "quality": parse.get("quality", []), + "season": parse.get("season", []), + "episodes": episodes, + "codec": parse.get("codec", []), + "audio": parse.get("audio", []), + "hdr": parse.get("hdr", False), + "upscaled": parse.get("upscaled", False), + "remastered": parse.get("remastered", False), + "proper": parse.get("proper", False), + "repack": parse.get("repack", False), + "subtitles": parse.get("subtitles") == "Available", + "language": parse.get("language", []), + "remux": parse.get("remux", False), + "extended": parse.get("extended", False) } + parsed_data["fetch"] = self._should_fetch(parsed_data) + return parsed_data + def episodes(self, string) -> List[int]: + """Return a list of episodes in the given string.""" parse = self._parse(string) return parse["episodes"] def episodes_in_season(self, season, string) -> List[int]: - parse = self._parse(string) + """Return a list of episodes in the given season.""" + parse = self._parse(string=string) if parse["season"] == season: return parse["episodes"] return [] - def _is_4k(self, string) -> bool: - """Check if content is `4k`.""" - if self.settings.include_4k: - parsed = self._parse(string) - return parsed.get("resolution", False) in ["2160p", "4K"] + def _should_fetch(self, parsed_data: dict) -> bool: + """Determine if the parsed content should be fetched.""" + # This is where we determine if the item should be fetched + # based on the user settings and predefined rules. + # Edit with caution. All have to match for the item to be fetched. + # item_language = self._get_item_language(item) + return ( + parsed_data["resolution"] in self.resolution and + # any(lang in parsed_data.get("language", item_language) for lang in self.language) and + not parsed_data["is_unwanted_quality"] + ) - def _is_highest_quality(self, string) -> bool: + def _is_highest_quality(self, parsed_data: dict) -> bool: """Check if content is `highest quality`.""" - if self.settings.highest_quality: - parsed = self._parse(string) - return any([ - parsed.get("hdr", False), - parsed.get("remux", False), - parsed.get("resolution", False) in ["UHD", "2160p", "4K"], - parsed.get("upscaled", False) - ]) - - def _is_repack_or_proper(self, string) -> bool: - """Check if content is `repack` or `proper`.""" - if self.settings.repack_proper: - parsed = self._parse(string) - return any([ - parsed.get("proper", False), - parsed.get("repack", False), - ]) + return any( + parsed.get("resolution") in ["UHD", "2160p", "4K"] or + parsed.get("hdr", False) or + parsed.get("remux", False) or + parsed.get("upscaled", False) + for parsed in parsed_data + ) def _is_dual_audio(self, string) -> bool: - """Check if content is `dual audio`.""" - parsed = self._parse(string) - return parsed.get("audio") == "Dual" or \ - re.search(r"((dual.audio)|(english|eng)\W+(dub|audio))", string, flags=re.IGNORECASE) is not None - - def _is_network(self, string) -> bool: - """Check if content is from a `network`.""" - parsed = self._parse(string) - network = ["Apple TV+", "Amazon Studios", "Netflix", - "Nickelodeon", "YouTube Premium", "Disney Plus", - "DisneyNOW", "HBO Max", "HBO", "Hulu Networks", - "DC Universe", "Adult Swim", "Comedy Central", - "Peacock", "AMC", "PBS", "Crunchyroll", - "Syndication", "Hallmark", "BBC", "VICE", - "MSNBC", "Crave"] # Will probably be used later in `Versions` - return (parsed.get("network", False)) in network + """Check if any content in parsed_data has dual audio.""" + dual_audio_patterns = [ + re.compile(r"\bmulti(?:ple)?[ .-]*(?:lang(?:uages?)?|audio|VF2)?\b", re.IGNORECASE), + re.compile(r"\btri(?:ple)?[ .-]*(?:audio|dub\w*)\b", re.IGNORECASE), + re.compile(r"\bdual[ .-]*(?:au?$|[aá]udio|line)\b", re.IGNORECASE), + re.compile(r"\bdual\b(?![ .-]*sub)", re.IGNORECASE), + re.compile(r"\b(?:audio|dub(?:bed)?)[ .-]*dual\b", re.IGNORECASE), + re.compile(r"\bengl?(?:sub[A-Z]*)?\b", re.IGNORECASE), + re.compile(r"\beng?sub[A-Z]*\b", re.IGNORECASE), + re.compile(r"\b(?:DUBBED|dublado|dubbing|DUBS?)\b", re.IGNORECASE), + ] + return any(pattern.search(string) for pattern in dual_audio_patterns) + + @staticmethod + def _is_complete_series(string) -> bool: + """Check if string is a `complete series`.""" + # Can be used on either movie or show item types + series_patterns = [ + re.compile(r"(?:\bthe\W)?(?:\bcomplete|collection|dvd)?\b[ .]?\bbox[ .-]?set\b", re.IGNORECASE), + re.compile(r"(?:\bthe\W)?(?:\bcomplete|collection|dvd)?\b[ .]?\bmini[ .-]?series\b", re.IGNORECASE), + re.compile(r"(?:\bthe\W)?(?:\bcomplete|full|all)\b.*\b(?:series|seasons|collection|episodes|set|pack|movies)\b", re.IGNORECASE), + re.compile(r"\b(?:series|seasons|movies?)\b.*\b(?:complete|collection)\b", re.IGNORECASE), + re.compile(r"(?:\bthe\W)?\bultimate\b[ .]\bcollection\b", re.IGNORECASE), + re.compile(r"\bcollection\b.*\b(?:set|pack|movies)\b", re.IGNORECASE), + re.compile(r"\bcollection\b", re.IGNORECASE), + re.compile(r"duology|trilogy|quadr[oi]logy|tetralogy|pentalogy|hexalogy|heptalogy|anthology|saga", re.IGNORECASE) + ] + return any(pattern.search(string) for pattern in series_patterns) + @staticmethod def _is_unwanted_quality(string) -> bool: - """Check if string has an `unwanted` quality.""" - patterns = [ - re.compile(r"(?:HD)?CAM(?:-?Rip)?", re.IGNORECASE), - re.compile(r"(?:HD)?TS|TELESYNC|PDVD|PreDVDRip", re.IGNORECASE), - re.compile(r"(?:HD)?TC|TELECINE", re.IGNORECASE), - re.compile(r"WEB[ -]?Cap", re.IGNORECASE), - re.compile(r"WP|WORKPRINT", re.IGNORECASE), - re.compile(r"(?:DVD)?SCR(?:EENER)?|BDSCR", re.IGNORECASE), - re.compile(r"DVD-?(?:Rip|Mux)", re.IGNORECASE), - re.compile(r"DVDR|DVD-Full|Full-rip", re.IGNORECASE), - re.compile(r"D?TVRip|DVBRip", re.IGNORECASE), - re.compile(r"VODR(?:ip)?", re.IGNORECASE) + """Check if string has an 'unwanted' quality. Default to False.""" + unwanted_patterns = [ + re.compile(r"\b(?:H[DQ][ .-]*)?CAM(?:H[DQ])?(?:[ .-]*Rip)?\b", re.IGNORECASE), + re.compile(r"\b(?:H[DQ][ .-]*)?S[ .-]*print\b", re.IGNORECASE), + re.compile(r"\b(?:HD[ .-]*)?T(?:ELE)?S(?:YNC)?(?:Rip)?\b", re.IGNORECASE), + re.compile(r"\b(?:HD[ .-]*)?T(?:ELE)?C(?:INE)?(?:Rip)?\b", re.IGNORECASE), + re.compile(r"\bP(?:re)?DVD(?:Rip)?\b", re.IGNORECASE), + re.compile(r"\b(?:DVD?|BD|BR)?[ .-]*Scr(?:eener)?\b", re.IGNORECASE), + re.compile(r"\bVHS\b", re.IGNORECASE), + re.compile(r"\bHD[ .-]*TV(?:Rip)?\b", re.IGNORECASE), + re.compile(r"\bDVB[ .-]*(?:Rip)?\b", re.IGNORECASE), + re.compile(r"\bSAT[ .-]*Rips?\b", re.IGNORECASE), + re.compile(r"\bTVRips?\b", re.IGNORECASE), + re.compile(r"\bR5\b", re.IGNORECASE), + re.compile(r"\b(DivX|XviD)\b", re.IGNORECASE), ] - return any(pattern.search(string) for pattern in patterns) - - def sort_streams(self, streams: dict) -> dict: - """Sorts streams based on user preferences.""" - def sorting_key(item): - _, stream = item - title = stream['name'] - return ( - self._is_dual_audio(title), - self._is_repack_or_proper(title), - self._is_highest_quality(title), - self._is_4k(title), - self._is_network(title) - ) - sorted_streams = sorted(streams.items(), key=sorting_key, reverse=True) - return dict(sorted_streams) - - def parse(self, string) -> bool: - """Parse the given string and return True if it matches the user settings.""" - parse = self._parse(string) - return ( - parse["resolution"] in self.resolution - and parse["language"] in self.language - and not parse["quality"] in self.unwanted_quality - and not self._is_unwanted_quality(string) - ) - - def get_title(self, string) -> str: - """Get the `title` from the given string.""" - parse = self._parse(string) - return parse["title"] + return any(pattern.search(string) for pattern in unwanted_patterns) - def check_for_title_match(self, item, string, threshold=94) -> bool: + def check_for_title_match(self, item, parsed_title, threshold=90) -> bool: """Check if the title matches PTN title using fuzzy matching.""" - # TODO1: remove special chars from parsed_title and target_title. Could improve matching. - # TODO2: We should be checking aliases as well for titles. Anime only probably? - parsed_title = self.get_title(string) - if item.type == "movie": - target_title = item.title - elif item.type == "season": + target_title = item.title + if item.type == "season": target_title = item.parent.title elif item.type == "episode": target_title = item.parent.parent.title - else: - return False - return fuzz.ratio(parsed_title.lower(), target_title.lower()) >= threshold + match_score = fuzz.ratio(parsed_title.lower(), target_title.lower()) + if match_score >= threshold: + return True + return False + + def _get_item_language(self, item) -> str: + """Get the language of the item.""" + # This is crap. Need to switch to using a dict instead. + if item.type == "season": + if item.parent.language == "en": + if item.parent.is_anime: + return ["English", "Japanese"] + return ["English"] + elif item.type == "episode": + if item.parent.parent.language == "en": + if item.parent.parent.is_anime: + return ["English", "Japanese"] + return ["English"] + if item.language == "en": + if item.is_anime: + return ["English", "Japanese"] + return ["English"] + if item.is_anime: + return ["English", "Japanese"] + return ["English"] + + +# def sort_streams(streams: dict, parser: Parser) -> dict: +# """Sorts streams based on user preferences.""" +# def sorting_key(item): +# _, stream = item +# parsed_data = stream.get('parsed_data', {}) + +# points = 0 +# if parser._is_dual_audio(parsed_data.get("string", "")): +# points += 5 +# if parser._is_repack_or_proper(parsed_data): +# points += 3 +# if parsed_data.get("is_4k", False) and (parser.settings.highest_quality or parser.settings.include_4k): +# points += 7 +# if not parsed_data.get("is_unwanted", False): +# points -= 10 # Unwanted content should be pushed to the bottom +# return points +# sorted_streams = sorted(streams.items(), key=sorting_key, reverse=True) +# return dict(sorted_streams) + parser = Parser() \ No newline at end of file diff --git a/backend/utils/request.py b/backend/utils/request.py index 6e7535b6..1fe29e49 100644 --- a/backend/utils/request.py +++ b/backend/utils/request.py @@ -32,10 +32,13 @@ def __init__(self, response: requests.Response, response_type=SimpleNamespace): def handle_response(self, response: requests.Response): """Handle different types of responses""" - if not self.is_ok: + if not self.is_ok and self.status_code not in [429, 520, 522]: logger.warning("Error: %s %s", response.status_code, response.content) + if self.status_code in [520, 522]: + # Cloudflare error from Torrentio + raise requests.exceptions.ConnectTimeout(response.content) if self.status_code not in [200, 201, 204]: - if self.status_code == 429: + if self.status_code in [429]: raise requests.exceptions.RequestException(response.content) return {} if len(response.content) > 0: diff --git a/frontend/package.json b/frontend/package.json index 300cf1b0..44db7ea0 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -42,14 +42,15 @@ "bits-ui": "^0.15.1", "clsx": "^2.0.0", "cmdk-sv": "^0.0.12", + "embla-carousel-svelte": "8.0.0-rc20", "formsnap": "^0.4.2", "lucide-svelte": "^0.314.0", "luxon": "^3.4.4", "mode-watcher": "^0.1.2", "motion": "^10.17.0", "nprogress": "^0.2.0", - "svelte-sonner": "^0.3.6", - "sveltekit-superforms": "^1.13.1", + "svelte-sonner": "^0.3.11", + "sveltekit-superforms": "^1.13.4", "tailwind-merge": "^2.2.0", "tailwind-variants": "^0.1.18", "uuid": "^9.0.1", diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index 1ade38ad..00ad565b 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -14,6 +14,9 @@ dependencies: cmdk-sv: specifier: ^0.0.12 version: 0.0.12(svelte@4.2.9) + embla-carousel-svelte: + specifier: 8.0.0-rc20 + version: 8.0.0-rc20(svelte@4.2.9) formsnap: specifier: ^0.4.2 version: 0.4.2(svelte@4.2.9)(sveltekit-superforms@1.13.4)(zod@3.22.4) @@ -33,10 +36,10 @@ dependencies: specifier: ^0.2.0 version: 0.2.0 svelte-sonner: - specifier: ^0.3.6 + specifier: ^0.3.11 version: 0.3.11(svelte@4.2.9) sveltekit-superforms: - specifier: ^1.13.1 + specifier: ^1.13.4 version: 1.13.4(@sveltejs/kit@2.4.3)(svelte@4.2.9)(zod@3.22.4) tailwind-merge: specifier: ^2.2.0 @@ -1368,6 +1371,28 @@ packages: resolution: {integrity: sha512-M4+u22ZJGpk4RY7tne6W+APkZhnnhmAH48FNl8iEFK2lEgob+U5rUQsIqQhvAwCXYpfd3H20pHK/ENsCvwTbsA==} dev: true + /embla-carousel-reactive-utils@8.0.0-rc20(embla-carousel@8.0.0-rc20): + resolution: {integrity: sha512-fE7IeSS8HqwDnTDMP8eo0i4pcYQAemmJq53zCLXnp3Yj/p5+IpB1nC7aKQjd2ug1dGOSwwNRFaPI3shlAVVW/A==} + peerDependencies: + embla-carousel: 8.0.0-rc20 + dependencies: + embla-carousel: 8.0.0-rc20 + dev: false + + /embla-carousel-svelte@8.0.0-rc20(svelte@4.2.9): + resolution: {integrity: sha512-MpON0Pw1EcYMjJt1VCnDk+HXTQrNwyHTlhdQ/WFx5QrXOpqvSup1nXKiLYsjxKkwBv5vYU9e04akNdqEJQ3iIg==} + peerDependencies: + svelte: ^3.49.0 || ^4.0.0 + dependencies: + embla-carousel: 8.0.0-rc20 + embla-carousel-reactive-utils: 8.0.0-rc20(embla-carousel@8.0.0-rc20) + svelte: 4.2.9 + dev: false + + /embla-carousel@8.0.0-rc20: + resolution: {integrity: sha512-fhzhbIAcsjSpUsg5jWsg0+zVyJhY5x2SPXtuS4MPAWQWoVQpvkcbX9r0FvPBn6emTbgNFRtAcWczstJy2msdUw==} + dev: false + /emoji-regex@8.0.0: resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} diff --git a/frontend/src/app.html b/frontend/src/app.html index bd30208f..1d87e0e6 100644 --- a/frontend/src/app.html +++ b/frontend/src/app.html @@ -23,14 +23,14 @@ /> - import type { IcebergItem, StatusInterface } from '$lib/types'; - import { formatWords, formatDate } from '$lib/helpers'; - import { Badge } from '$lib/components/ui/badge'; - - export let plexDebridItem: IcebergItem; - export let itemState: StatusInterface; - - let fallback = 'https://via.placeholder.com/198x228.png?text=No+thumbnail'; - let poster = `https://images.metahub.space/poster/small/${plexDebridItem.imdb_id}/img`; - let banner = `https://images.metahub.space/background/medium/${plexDebridItem.imdb_id}/img`; - - // TODO: Use item ID to show more data - // TODO: Make use of type - - -
-
-
-
- - test (poster = fallback)} - class=" w-[4.5rem] min-w-[4.5rem] h-24 rounded-md hover:scale-105 transition-all ease-in-out duration-300" - /> - -
-

- {plexDebridItem.title} -

-

Aired {formatDate(plexDebridItem.aired_at, 'short')}

-
- {#each plexDebridItem.genres as genre} - - {formatWords(genre)} - - {/each} -
-
-
-
-
-

Status

- - {itemState.text ?? formatWords(plexDebridItem.state)} - -
-
-

Requested

-

{formatDate(plexDebridItem.requested_at, 'long', true)}

-
-
-

Requested by

-

{plexDebridItem.requested_by}

-
-
-
-
diff --git a/frontend/src/lib/components/ui/accordion/accordion-item.svelte b/frontend/src/lib/components/ui/accordion/accordion-item.svelte index d1c54298..9d837d71 100644 --- a/frontend/src/lib/components/ui/accordion/accordion-item.svelte +++ b/frontend/src/lib/components/ui/accordion/accordion-item.svelte @@ -9,10 +9,6 @@ export { className as class }; - + diff --git a/frontend/src/lib/components/ui/alert-dialog/alert-dialog-cancel.svelte b/frontend/src/lib/components/ui/alert-dialog/alert-dialog-cancel.svelte index 8b3aa0ff..5aee9a6a 100644 --- a/frontend/src/lib/components/ui/alert-dialog/alert-dialog-cancel.svelte +++ b/frontend/src/lib/components/ui/alert-dialog/alert-dialog-cancel.svelte @@ -11,11 +11,7 @@
diff --git a/frontend/src/lib/components/ui/alert-dialog/alert-dialog-header.svelte b/frontend/src/lib/components/ui/alert-dialog/alert-dialog-header.svelte index b2d63dbe..b83aaadf 100644 --- a/frontend/src/lib/components/ui/alert-dialog/alert-dialog-header.svelte +++ b/frontend/src/lib/components/ui/alert-dialog/alert-dialog-header.svelte @@ -8,9 +8,6 @@ export { className as class }; -
+
diff --git a/frontend/src/lib/components/ui/alert-dialog/alert-dialog-overlay.svelte b/frontend/src/lib/components/ui/alert-dialog/alert-dialog-overlay.svelte index 2d02c49c..2c43babe 100644 --- a/frontend/src/lib/components/ui/alert-dialog/alert-dialog-overlay.svelte +++ b/frontend/src/lib/components/ui/alert-dialog/alert-dialog-overlay.svelte @@ -16,9 +16,6 @@ diff --git a/frontend/src/lib/components/ui/alert-dialog/alert-dialog-title.svelte b/frontend/src/lib/components/ui/alert-dialog/alert-dialog-title.svelte index 0b57ce7a..4800f0b5 100644 --- a/frontend/src/lib/components/ui/alert-dialog/alert-dialog-title.svelte +++ b/frontend/src/lib/components/ui/alert-dialog/alert-dialog-title.svelte @@ -9,10 +9,6 @@ export { className as class }; - + diff --git a/frontend/src/lib/components/ui/alert/alert.svelte b/frontend/src/lib/components/ui/alert/alert.svelte index ef6b407c..8c6790fd 100644 --- a/frontend/src/lib/components/ui/alert/alert.svelte +++ b/frontend/src/lib/components/ui/alert/alert.svelte @@ -12,10 +12,6 @@ export { className as class }; -