From 4b46c3828150d1f39d3e98037c01838188e3f9fc Mon Sep 17 00:00:00 2001 From: Spoked Date: Mon, 22 Jan 2024 08:00:28 -0500 Subject: [PATCH 01/65] rework in progress. --- backend/program/content/listrr.py | 15 +- backend/program/content/overseerr.py | 2 - backend/program/content/plex_watchlist.py | 11 +- backend/program/media/item.py | 5 +- backend/program/plex.py | 21 +- backend/program/scrapers/__init__.py | 4 +- backend/program/scrapers/jackett.py | 32 +-- backend/program/scrapers/orionoid.py | 11 +- backend/program/scrapers/torrentio.py | 29 ++- backend/program/updaters/trakt.py | 9 +- backend/utils/parser.py | 282 +++++++++++----------- backend/utils/request.py | 4 +- 12 files changed, 235 insertions(+), 190 deletions(-) diff --git a/backend/program/content/listrr.py b/backend/program/content/listrr.py index b60b269a..fba7f8b6 100644 --- a/backend/program/content/listrr.py +++ b/backend/program/content/listrr.py @@ -35,17 +35,25 @@ def __init__(self, media_items: MediaItemContainer): logger.info("Listrr initialized!") def validate_settings(self) -> bool: + """Validate Listrr settings.""" if not self.settings.enabled: logger.debug("Listrr is set to disabled.") return False if self.settings.api_key == "" or len(self.settings.api_key) != 64: - logger.error("Listrr api key is not set.") + logger.error("Listrr api key is not set or invalid.") return False + for list_name, content_list in [('movie_lists', self.settings.movie_lists), + ('show_lists', self.settings.show_lists)]: + if content_list is not None: + for item in content_list: + if len(item) != 24: + logger.error(f"{list_name} contains an item with invalid length: {item}") + return False try: response = ping("https://listrr.pro/", additional_headers=self.headers) return response.ok except Exception: - logger.error("Listrr url is not reachable.") + logger.error("Listrr Error - %s: %s ", response.status_code, response.reason) return False def run(self): @@ -56,7 +64,8 @@ def run(self): movie_items = self._get_items_from_Listrr("Movies", self.settings.movie_lists) show_items = self._get_items_from_Listrr("Shows", self.settings.show_lists) items = list(set(movie_items + show_items)) - container = self.updater.create_items(items) + new_items = [item for item in items if item not in self.media_items] + container = self.updater.create_items(new_items) for item in container: item.set("requested_by", "Listrr") added_items = self.media_items.extend(container) diff --git a/backend/program/content/overseerr.py b/backend/program/content/overseerr.py index a33c00e9..a81bb4a2 100644 --- a/backend/program/content/overseerr.py +++ b/backend/program/content/overseerr.py @@ -65,7 +65,6 @@ def run(self): def _get_items_from_overseerr(self, amount: int): """Fetch media from overseerr""" - response = get( self.settings.url + f"/api/v1/request?take={amount}", additional_headers=self.headers, @@ -79,7 +78,6 @@ def _get_items_from_overseerr(self, amount: int): ids.append(imdb_id) else: ids.append(item.media.imdbId) - return ids def get_imdb_id(self, overseerr_item): diff --git a/backend/program/content/plex_watchlist.py b/backend/program/content/plex_watchlist.py index 9abcbf67..1056e8bf 100644 --- a/backend/program/content/plex_watchlist.py +++ b/backend/program/content/plex_watchlist.py @@ -1,7 +1,7 @@ """Plex Watchlist Module""" from typing import Optional from pydantic import BaseModel -from requests import ConnectTimeout +from requests import ConnectTimeout, HTTPError from utils.request import get, ping from utils.logger import logger from utils.settings import settings_manager @@ -36,13 +36,20 @@ def validate_settings(self): return False if self.settings.rss: try: - response = ping(self.settings.rss, timeout=15) + response = ping(self.settings.rss) if response.ok: self.rss_enabled = True return True else: logger.warn(f"Plex RSS URL is not reachable. Falling back to normal Watchlist.") return True + except HTTPError as e: + if e.response.status_code in [404]: + logger.error("Plex RSS URL is invalid. Falling back to normal Watchlist.") + return True + if e.response.status_code >= 400 and e.response.status_code <= 500: + logger.warn(f"Plex RSS URL is not reachable. Falling back to normal Watchlist.") + return True except Exception: return False return True diff --git a/backend/program/media/item.py b/backend/program/media/item.py index 71f845ac..a715fa49 100644 --- a/backend/program/media/item.py +++ b/backend/program/media/item.py @@ -27,6 +27,8 @@ def __init__(self, item): self.requested_by = item.get("requested_by", None) self.file = None self.folder = None + self.parsed = False + self.parsed_data = item.get("parsed_data", []) # Media related self.title = item.get("title", None) @@ -236,7 +238,8 @@ def __init__(self, item): super().__init__(item) def __eq__(self, other): - return self.number == other.number + if type(self) == type(other) and self.parent == other.parent: + return self.number == other.number def __repr__(self): return f"Episode:{self.number}:{self.state.__class__.__name__}" diff --git a/backend/program/plex.py b/backend/program/plex.py index 809d3aba..aac5f72f 100644 --- a/backend/program/plex.py +++ b/backend/program/plex.py @@ -8,6 +8,7 @@ from typing import Optional from plexapi.server import PlexServer from pydantic import BaseModel +from program.updaters.trakt import get_imdbid_from_tvdb from utils.logger import logger from utils.settings import settings_manager as settings from program.media.container import MediaItemContainer @@ -220,16 +221,16 @@ def _map_item_from_data(item): # This is due to season 0 (specials) not having imdb ids. # Attempt to get the imdb id from the tvdb id if we don't have it. # Needs more testing.. - # if not imdb_id: - # logger.debug("Unable to find imdb, trying tvdb for %s", title) - # tvdb_id = next( - # (guid.id.split("://")[-1] for guid in guids if "tvdb" in guid.id), None - # ) - # if tvdb_id: - # logger.debug("Unable to find imdb, but found tvdb: %s", tvdb_id) - # imdb_id = get_imdbid_from_tvdb(tvdb_id) - # if imdb_id: - # logger.debug("Found imdb from tvdb: %s", imdb_id) + if not imdb_id: + logger.debug("Unable to find imdb, trying tvdb for %s", title) + tvdb_id = next( + (guid.id.split("://")[-1] for guid in guids if "tvdb" in guid.id), None + ) + if tvdb_id: + logger.debug("Unable to find imdb, but found tvdb: %s", tvdb_id) + imdb_id = get_imdbid_from_tvdb(tvdb_id) + if imdb_id: + logger.debug("Found imdb from tvdb: %s", imdb_id) media_item_data = { "title": title, diff --git a/backend/program/scrapers/__init__.py b/backend/program/scrapers/__init__.py index ae87ac97..e7a302e4 100644 --- a/backend/program/scrapers/__init__.py +++ b/backend/program/scrapers/__init__.py @@ -2,8 +2,8 @@ from pydantic import BaseModel from utils.service_manager import ServiceManager from utils.settings import settings_manager as settings +# from utils.parser import parser, sort_streams from utils.logger import logger -from utils.parser import parser from .torrentio import Torrentio from .orionoid import Orionoid from .jackett import Jackett @@ -34,6 +34,8 @@ def run(self, item) -> None: service.run(item) item.set("scraped_at", datetime.now()) item.set("scraped_times", item.scraped_times + 1) + # sorted_streams = sort_streams(item.streams, parser) + # item.set("streams", sorted_streams) def _can_we_scrape(self, item) -> bool: return self._is_released(item) and self._needs_new_scrape(item) diff --git a/backend/program/scrapers/jackett.py b/backend/program/scrapers/jackett.py index 90063eb5..f86ce2eb 100644 --- a/backend/program/scrapers/jackett.py +++ b/backend/program/scrapers/jackett.py @@ -21,7 +21,7 @@ def __init__(self, _): self.api_key = None self.settings = JackettConfig(**settings_manager.get(f"scraping.{self.key}")) self.initialized = self.validate_settings() - if not self.initialized or not self.api_key: + if not self.initialized and not self.api_key: return self.minute_limiter = RateLimiter(max_calls=60, period=60, raise_on_limit=True) self.second_limiter = RateLimiter(max_calls=1, period=3) @@ -72,25 +72,29 @@ def api_scrape(self, item): with self.minute_limiter: query = "" if item.type == "movie": - query = f"&cat=2010,2020,2030,2040,2045,2050,2080&t=movie&q={item.title} {item.aired_at.year}" + query = f"&cat=2000,2010,2020,2030,2040,2045,2050,2080&t=movie&q={item.title}&year{item.aired_at.year}" if item.type == "season": - query = f"&cat=5010,5020,5030,5040,5045,5050,5060,5070,5080&t=tvsearch&q={item.parent.title}&season={item.number}" + query = f"&cat=5000,5010,5020,5030,5040,5045,5050,5060,5070,5080&t=tvsearch&q={item.parent.title}&season={item.number}" if item.type == "episode": - query = f"&cat=5010,5020,5030,5040,5045,5050,5060,5070,5080&t=tvsearch&q={item.parent.parent.title}&season={item.parent.number}&ep={item.number}" + query = f"&cat=5000,5010,5020,5030,5040,5045,5050,5060,5070,5080&t=tvsearch&q={item.parent.parent.title}&season={item.parent.number}&ep={item.number}" url = (f"{self.settings.url}/api/v2.0/indexers/!status:failing,test:passed/results/torznab?apikey={self.api_key}{query}") with self.second_limiter: response = get(url=url, retry_if_failed=False, timeout=60) + if response.is_ok: data = {} - for stream in response.data['rss']['channel'].get('item', []): - title = stream.get('title') - if parser.check_for_title_match(item, title): - if parser.parse(title): - attr = stream.get('torznab:attr', []) - infohash_attr = next((a for a in attr if a.get('@name') == 'infohash'), None) - if infohash_attr: - infohash = infohash_attr.get('@value') - data[infohash] = {"name": title} + parsed_data_list = [] + for stream in response.data["rss"]["channel"].get("item", []): + title = stream.get("title") + parsed_data = parser.parse(item, title) + if parsed_data.get("fetch", True) and parsed_data.get("title_match", False): + attr = stream.get("torznab:attr", []) + infohash_attr = next((a for a in attr if a.get("@name") == "infohash"), None) + if infohash_attr: + infohash = infohash_attr.get("@value") + data[infohash] = {"name": title} + parsed_data_list.append(parsed_data) if len(data) > 0: - return parser.sort_streams(data) + item.parsed_data = parsed_data_list + return data return {} diff --git a/backend/program/scrapers/orionoid.py b/backend/program/scrapers/orionoid.py index 98d0d02b..a56aa408 100644 --- a/backend/program/scrapers/orionoid.py +++ b/backend/program/scrapers/orionoid.py @@ -124,12 +124,17 @@ def api_scrape(self, item): with self.second_limiter: response = get(url, retry_if_failed=False, timeout=60) - if response.is_ok and response.data.result.status != "error": + + parsed_data_list = [] + if response.is_ok and len(response.data.data.streams) > 0: data = {} for stream in response.data.data.streams: title = stream.file.name - if parser.parse(title) and stream.file.hash: + parsed_data = parser.parse(item, title) + if parsed_data["fetch"] and stream.file.hash: data[stream.file.hash] = {"name": title} + parsed_data_list.append(parsed_data) if len(data) > 0: - return parser.sort_streams(data) + item.parsed_data = parsed_data_list + return data return {} \ No newline at end of file diff --git a/backend/program/scrapers/torrentio.py b/backend/program/scrapers/torrentio.py index 97195852..b4614b2a 100644 --- a/backend/program/scrapers/torrentio.py +++ b/backend/program/scrapers/torrentio.py @@ -1,6 +1,7 @@ """ Torrentio scraper module """ from typing import Optional from pydantic import BaseModel +from requests import HTTPError, ReadTimeout from requests.exceptions import RequestException from utils.logger import logger from utils.request import RateLimitExceeded, get, RateLimiter @@ -20,7 +21,7 @@ def __init__(self, _): self.key = "torrentio" self.settings = TorrentioConfig(**settings_manager.get(f"scraping.{self.key}")) self.minute_limiter = RateLimiter(max_calls=60, period=60, raise_on_limit=True) - self.second_limiter = RateLimiter(max_calls=1, period=3) + self.second_limiter = RateLimiter(max_calls=1, period=5) self.initialized = self.validate_settings() if not self.initialized: return @@ -43,10 +44,11 @@ def run(self, item) -> None: return except RateLimitExceeded: self.minute_limiter.limit_hit() + logger.debug("Torrentio rate limit hit for item: %s", item.log_string) return def _scrape_item(self, item): - data = self.api_scrape(item) + data = self.api_scrape(item) # Unpack the tuple to get data and stream_count if len(data) > 0: item.streams.update(data) logger.debug("Found %s streams for %s", len(data), item.log_string) @@ -74,19 +76,20 @@ def api_scrape(self, item): + f"/stream/{scrape_type}/{imdb_id}" ) if identifier: - url += f"{identifier}" + url += identifier with self.second_limiter: - response = get(f"{url}.json", retry_if_failed=False, timeout=30) - if response.is_ok: + response = get(f"{url}.json", retry_if_failed=False, timeout=60) + + parsed_data_list = [] + if response.is_ok and len(response.data.streams) > 0: data = {} - if len(response.data.streams) == 0: - return data for stream in response.data.streams: - title = stream.title.split("\n👤")[0] - if parser.parse(title): - data[stream.infoHash] = { - "name": title, - } + torrent = stream.title.split("\n👤")[0] + parsed_data = parser.parse(item, torrent) + if parsed_data.get("fetch", False): + data[stream.infoHash] = {"name": torrent} + parsed_data_list.append(parsed_data) if len(data) > 0: - return parser.sort_streams(data) + item.parsed_data = parsed_data_list + return data return {} diff --git a/backend/program/updaters/trakt.py b/backend/program/updaters/trakt.py index 10a61b2d..720b2538 100644 --- a/backend/program/updaters/trakt.py +++ b/backend/program/updaters/trakt.py @@ -142,8 +142,15 @@ def create_item_from_imdb_id(imdb_id: str): media_type = response.data[0].type if media_type == "movie": data = response.data[0].movie - else: + elif media_type == "show": data = response.data[0].show + elif media_type == "season": + data = response.data[0].season + elif media_type == "episode": + data = response.data[0].episode + else: + logger.debug("Unknown item %s with type %s", imdb_id, media_type) + return None if data: return _map_item_from_data(data, media_type) return None diff --git a/backend/utils/parser.py b/backend/utils/parser.py index 0e48331d..0bbae0e1 100644 --- a/backend/utils/parser.py +++ b/backend/utils/parser.py @@ -17,23 +17,25 @@ class Parser: def __init__(self): self.settings = ParserConfig(**settings_manager.get("parser")) - self.language = self.settings.language or ["English"] - self.resolution = ["1080p", "720p"] - self.unwanted_codec = ["H.263", "Xvid"] # Bad for transcoding - self.quality = [None, "Blu-ray", "WEB-DL", "WEBRip", "HDRip", - "HDTVRip", "BDRip", "Pay-Per-View Rip"] - self.validate_settings() - - def validate_settings(self): + self.language = self.settings.language + self.resolution = self.determine_resolution() + + def determine_resolution(self): + """Determine the resolution to use based on user settings.""" if self.settings.highest_quality: - self.resolution = ["UHD", "2160p", "4K", "1080p", "720p"] - elif self.settings.include_4k: - self.resolution = ["2160p", "4K", "1080p", "720p"] - else: - self.resolution = ["1080p", "720p"] + return ["UHD", "2160p", "4K", "1080p", "720p"] + if self.settings.include_4k: + return ["2160p", "4K", "1080p", "720p"] + return ["1080p", "720p"] + + def parse(self, item, string) -> dict: + """Parse the given string and return True if it matches the user settings.""" + return self._parse(item, string) - def _parse(self, string): + def _parse(self, item, string) -> dict: + """Parse the given string and return the parsed data.""" parse = PTN.parse(string) + parsed_title = parse.get("title", "") # episodes episodes = [] @@ -45,156 +47,160 @@ def _parse(self, string): else: episodes.append(int(episode)) - title = parse.get("title") - season = parse.get("season") - audio = parse.get("audio") - codec = parse.get("codec") - resolution = parse.get("resolution") - quality = parse.get("quality") - subtitles = parse.get("subtitles") - language = parse.get("language") - hdr = parse.get("hdr") - upscaled = parse.get("upscaled") - remastered = parse.get("remastered") - proper = parse.get("proper") - repack = parse.get("repack") - remux = parse.get("remux") - if not language: - language = "English" - extended = parse.get("extended") - - return { - "title": title, - "resolution": resolution or [], - "quality": quality or [], - "season": season, - "episodes": episodes or [], - "codec": codec or [], - "audio": audio or [], - "hdr": hdr or False, - "upscaled": upscaled or False, - "remastered": remastered or False, - "proper": proper or False, - "repack": repack or False, - "subtitles": True if subtitles == "Available" else False, - "language": language or [], - "remux": remux or False, - "extended": extended, + title_match = self.check_for_title_match(item, string) + is_4k = parse.get("resolution", False) in ["2160p", "4K", "UHD"] + is_complete = self._is_complete_series(string) + is_dual_audio = self._is_dual_audio(item, string) + + parsed_data = { + "string": string, + "parsed_title": parsed_title, + "title_match": title_match, + "is_4k": is_4k, + "is_dual_audio": is_dual_audio, + "is_complete": is_complete, + "_is_unwanted_quality": self._is_unwanted_quality(string), + "year": parse.get("year", False), + "resolution": parse.get("resolution", []), + "quality": parse.get("quality", []), + "season": parse.get("season", []), + "episodes": episodes, + "codec": parse.get("codec", []), + "audio": parse.get("audio", []), + "hdr": parse.get("hdr", False), + "upscaled": parse.get("upscaled", False), + "remastered": parse.get("remastered", False), + "proper": parse.get("proper", False), + "repack": parse.get("repack", False), + "subtitles": parse.get("subtitles") == "Available", + "language": parse.get("language", []), + "remux": parse.get("remux", False), + "extended": parse.get("extended", []) } + parsed_data["fetch"] = self._should_fetch(parsed_data) + return parsed_data + def episodes(self, string) -> List[int]: + """Return a list of episodes in the given string.""" parse = self._parse(string) return parse["episodes"] def episodes_in_season(self, season, string) -> List[int]: + """Return a list of episodes in the given season.""" parse = self._parse(string) if parse["season"] == season: return parse["episodes"] return [] - def _is_4k(self, string) -> bool: - """Check if content is `4k`.""" - if self.settings.include_4k: - parsed = self._parse(string) - return parsed.get("resolution", False) in ["2160p", "4K"] + def _should_fetch(self, parsed_data) -> bool: + """Determine if the parsed content should be fetched.""" + # This is where we determine if the item should be fetched + # based on the user settings and predefined rules. + # Edit with caution. All have to match for the item to be fetched. + return ( + parsed_data["resolution"] in self.resolution and + any(lang in parsed_data.get("language", []) for lang in self.language) and + not parsed_data["is_unwanted"] + ) - def _is_highest_quality(self, string) -> bool: + def _is_highest_quality(self, item) -> bool: """Check if content is `highest quality`.""" - if self.settings.highest_quality: - parsed = self._parse(string) - return any([ - parsed.get("hdr", False), - parsed.get("remux", False), - parsed.get("resolution", False) in ["UHD", "2160p", "4K"], - parsed.get("upscaled", False) - ]) - - def _is_repack_or_proper(self, string) -> bool: + return any([ + item.parsed_data.get("hdr", False), + item.parsed_data.get("remux", False), + item.parsed_data.get("resolution", False) in ["UHD", "2160p", "4K"], + item.parsed_data.get("upscaled", False) + ]) + + def _is_repack_or_proper(self, item) -> bool: """Check if content is `repack` or `proper`.""" - if self.settings.repack_proper: - parsed = self._parse(string) - return any([ - parsed.get("proper", False), - parsed.get("repack", False), - ]) - - def _is_dual_audio(self, string) -> bool: - """Check if content is `dual audio`.""" - parsed = self._parse(string) - return parsed.get("audio") == "Dual" or \ - re.search(r"((dual.audio)|(english|eng)\W+(dub|audio))", string, flags=re.IGNORECASE) is not None - - def _is_network(self, string) -> bool: - """Check if content is from a `network`.""" - parsed = self._parse(string) - network = ["Apple TV+", "Amazon Studios", "Netflix", - "Nickelodeon", "YouTube Premium", "Disney Plus", - "DisneyNOW", "HBO Max", "HBO", "Hulu Networks", - "DC Universe", "Adult Swim", "Comedy Central", - "Peacock", "AMC", "PBS", "Crunchyroll", - "Syndication", "Hallmark", "BBC", "VICE", - "MSNBC", "Crave"] # Will probably be used later in `Versions` - return (parsed.get("network", False)) in network + return any([ + item.parsed_data.get("proper", False), + item.parsed_data.get("repack", False) + ]) - def _is_unwanted_quality(string) -> bool: - """Check if string has an `unwanted` quality.""" + def _is_dual_audio(self, item, string) -> bool: + """Check if content is `dual audio`.""" + if item.parsed_data.get("audio", False) == "Dual": + return True patterns = [ - re.compile(r"(?:HD)?CAM(?:-?Rip)?", re.IGNORECASE), - re.compile(r"(?:HD)?TS|TELESYNC|PDVD|PreDVDRip", re.IGNORECASE), - re.compile(r"(?:HD)?TC|TELECINE", re.IGNORECASE), - re.compile(r"WEB[ -]?Cap", re.IGNORECASE), - re.compile(r"WP|WORKPRINT", re.IGNORECASE), - re.compile(r"(?:DVD)?SCR(?:EENER)?|BDSCR", re.IGNORECASE), - re.compile(r"DVD-?(?:Rip|Mux)", re.IGNORECASE), - re.compile(r"DVDR|DVD-Full|Full-rip", re.IGNORECASE), - re.compile(r"D?TVRip|DVBRip", re.IGNORECASE), - re.compile(r"VODR(?:ip)?", re.IGNORECASE) + re.compile(r"\bmulti(?:ple)?[ .-]*(?:lang(?:uages?)?|audio|VF2)?\b", re.IGNORECASE), + re.compile(r"\btri(?:ple)?[ .-]*(?:audio|dub\w*)\b", re.IGNORECASE), + re.compile(r"\bdual[ .-]*(?:au?$|[aá]udio|line)\b", re.IGNORECASE), + re.compile(r"\b(?:audio|dub(?:bed)?)[ .-]*dual\b", re.IGNORECASE), + re.compile(r"\b(?:DUBBED|dublado|dubbing|DUBS?)\b", re.IGNORECASE), ] return any(pattern.search(string) for pattern in patterns) - def sort_streams(self, streams: dict) -> dict: - """Sorts streams based on user preferences.""" - def sorting_key(item): - _, stream = item - title = stream['name'] - return ( - self._is_dual_audio(title), - self._is_repack_or_proper(title), - self._is_highest_quality(title), - self._is_4k(title), - self._is_network(title) - ) - sorted_streams = sorted(streams.items(), key=sorting_key, reverse=True) - return dict(sorted_streams) - - def parse(self, string) -> bool: - """Parse the given string and return True if it matches the user settings.""" - parse = self._parse(string) - return ( - parse["resolution"] in self.resolution - and parse["language"] in self.language - and not parse["quality"] in self.unwanted_quality - and not self._is_unwanted_quality(string) - ) + @staticmethod + def _is_complete_series(string) -> bool: + """Check if string is a `complete series`.""" + # Can be used on either movie or show item type + patterns = [ + re.compile(r"(?:\bthe\W)?(?:\bcomplete|collection|dvd)?\b[ .]?\bbox[ .-]?set\b", re.IGNORECASE), + re.compile(r"(?:\bthe\W)?(?:\bcomplete|collection|dvd)?\b[ .]?\bmini[ .-]?series\b", re.IGNORECASE), + re.compile(r"(?:\bthe\W)?(?:\bcomplete|full|all)\b.*\b(?:series|seasons|collection|episodes|set|pack|movies)\b", re.IGNORECASE), + re.compile(r"\b(?:series|seasons|movies?)\b.*\b(?:complete|collection)\b", re.IGNORECASE), + re.compile(r"(?:\bthe\W)?\bultimate\b[ .]\bcollection\b", re.IGNORECASE), + re.compile(r"\bcollection\b.*\b(?:set|pack|movies)\b", re.IGNORECASE), + re.compile(r"\bcollection\b", re.IGNORECASE), + re.compile(r"duology|trilogy|quadr[oi]logy|tetralogy|pentalogy|hexalogy|heptalogy|anthology|saga", re.IGNORECASE) + ] + return any(pattern.search(string) for pattern in patterns) - def get_title(self, string) -> str: - """Get the `title` from the given string.""" - parse = self._parse(string) - return parse["title"] + @staticmethod + def _is_unwanted_quality(string) -> bool: + """Check if string has an `unwanted` quality.""" + patterns = [ + re.compile(r"\b(?:H[DQ][ .-]*)?CAM(?:H[DQ])?(?:[ .-]*Rip)?\b", re.IGNORECASE), + re.compile(r"\b(?:H[DQ][ .-]*)?S[ .-]*print\b", re.IGNORECASE), + re.compile(r"\b(?:HD[ .-]*)?T(?:ELE)?S(?:YNC)?(?:Rip)?\b", re.IGNORECASE), + re.compile(r"\b(?:HD[ .-]*)?T(?:ELE)?C(?:INE)?(?:Rip)?\b", re.IGNORECASE), + re.compile(r"\bP(?:re)?DVD(?:Rip)?\b", re.IGNORECASE), + re.compile(r"\b(?:DVD?|BD|BR)?[ .-]*Scr(?:eener)?\b", re.IGNORECASE), + re.compile(r"\bVHS\b", re.IGNORECASE), + re.compile(r"\bHD[ .-]*TV(?:Rip)?\b", re.IGNORECASE), + re.compile(r"\bDVB[ .-]*(?:Rip)?\b", re.IGNORECASE), + re.compile(r"\bSAT[ .-]*Rips?\b", re.IGNORECASE), + re.compile(r"\bTVRips?\b", re.IGNORECASE), + re.compile(r"\bR5\b", re.IGNORECASE), + re.compile(r"\b(DivX|XviD)\b", re.IGNORECASE), + ] + # Return False if any pattern matches (indicating unwanted quality) + # Default to True if no pattern matches + return not any(pattern.search(string) for pattern in patterns) - def check_for_title_match(self, item, string, threshold=94) -> bool: + def check_for_title_match(self, item, string, threshold = 90) -> bool: """Check if the title matches PTN title using fuzzy matching.""" - # TODO1: remove special chars from parsed_title and target_title. Could improve matching. - # TODO2: We should be checking aliases as well for titles. Anime only probably? - parsed_title = self.get_title(string) - if item.type == "movie": - target_title = item.title - elif item.type == "season": + if item.type == "season": target_title = item.parent.title elif item.type == "episode": target_title = item.parent.parent.title else: - return False - return fuzz.ratio(parsed_title.lower(), target_title.lower()) >= threshold + target_title = item.title + match_score = fuzz.ratio(string.lower(), target_title.lower()) + return match_score >= threshold + + +# def sort_streams(streams: dict, parser: Parser) -> dict: +# """Sorts streams based on user preferences.""" +# def sorting_key(item): +# _, stream = item +# parsed_data = stream.get('parsed_data', {}) + +# points = 0 +# if parser._is_dual_audio(parsed_data.get("string", "")): +# points += 5 +# if parser._is_repack_or_proper(parsed_data): +# points += 3 +# if parsed_data.get("is_4k", False) and (parser.settings.highest_quality or parser.settings.include_4k): +# points += 7 +# if not parsed_data.get("is_unwanted", False): +# points -= 10 # Unwanted content should be pushed to the bottom +# return points +# sorted_streams = sorted(streams.items(), key=sorting_key, reverse=True) +# return dict(sorted_streams) + parser = Parser() \ No newline at end of file diff --git a/backend/utils/request.py b/backend/utils/request.py index 6e7535b6..9a020b4e 100644 --- a/backend/utils/request.py +++ b/backend/utils/request.py @@ -32,10 +32,10 @@ def __init__(self, response: requests.Response, response_type=SimpleNamespace): def handle_response(self, response: requests.Response): """Handle different types of responses""" - if not self.is_ok: + if not self.is_ok and self.status_code not in [429, 520]: logger.warning("Error: %s %s", response.status_code, response.content) if self.status_code not in [200, 201, 204]: - if self.status_code == 429: + if self.status_code in [429, 520]: raise requests.exceptions.RequestException(response.content) return {} if len(response.content) > 0: From d24dd0b2014d823ea8d47a04c287c831a92a6440 Mon Sep 17 00:00:00 2001 From: Spoked Date: Mon, 22 Jan 2024 09:28:11 -0500 Subject: [PATCH 02/65] fix: correct limits for orionoid --- backend/program/scrapers/orionoid.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/backend/program/scrapers/orionoid.py b/backend/program/scrapers/orionoid.py index a56aa408..caf0f078 100644 --- a/backend/program/scrapers/orionoid.py +++ b/backend/program/scrapers/orionoid.py @@ -28,11 +28,10 @@ def __init__(self, _): self.initialized = True else: return - self.max_calls = 50 if not self.is_premium else 999999 - self.minute_limiter = RateLimiter( - max_calls=self.max_calls, period=86400, raise_on_limit=True - ) - self.second_limiter = RateLimiter(max_calls=1, period=1) + self.max_calls = 50 if not self.is_premium else 60 + self.period = 86400 if not self.is_premium else 60 + self.minute_limiter = RateLimiter(max_calls=self.max_calls, period=self.period, raise_on_limit=True) + self.second_limiter = RateLimiter(max_calls=1, period=5) logger.info("Orionoid initialized!") def validate_settings(self) -> bool: From 8ce2967b0134ba723bbf32ca4f87c379a30da8c6 Mon Sep 17 00:00:00 2001 From: Spoked Date: Mon, 22 Jan 2024 10:14:18 -0500 Subject: [PATCH 03/65] fix: switch to comprehensions --- backend/program/scrapers/jackett.py | 15 ++++++--------- backend/program/scrapers/orionoid.py | 22 ++++++++++++---------- backend/program/scrapers/torrentio.py | 20 ++++++++++---------- 3 files changed, 28 insertions(+), 29 deletions(-) diff --git a/backend/program/scrapers/jackett.py b/backend/program/scrapers/jackett.py index f86ce2eb..1a938876 100644 --- a/backend/program/scrapers/jackett.py +++ b/backend/program/scrapers/jackett.py @@ -80,21 +80,18 @@ def api_scrape(self, item): url = (f"{self.settings.url}/api/v2.0/indexers/!status:failing,test:passed/results/torznab?apikey={self.api_key}{query}") with self.second_limiter: response = get(url=url, retry_if_failed=False, timeout=60) - if response.is_ok: data = {} - parsed_data_list = [] - for stream in response.data["rss"]["channel"].get("item", []): - title = stream.get("title") - parsed_data = parser.parse(item, title) + streams = response.data["rss"]["channel"].get("item", []) + parsed_data_list = [parser.parse(item, stream.get("title")) for stream in streams] + for stream, parsed_data in zip(streams, parsed_data_list): if parsed_data.get("fetch", True) and parsed_data.get("title_match", False): attr = stream.get("torznab:attr", []) infohash_attr = next((a for a in attr if a.get("@name") == "infohash"), None) if infohash_attr: infohash = infohash_attr.get("@value") - data[infohash] = {"name": title} - parsed_data_list.append(parsed_data) - if len(data) > 0: + data[infohash] = {"name": stream.get("title")} + if data: item.parsed_data = parsed_data_list return data - return {} + return {} diff --git a/backend/program/scrapers/orionoid.py b/backend/program/scrapers/orionoid.py index caf0f078..8db1613e 100644 --- a/backend/program/scrapers/orionoid.py +++ b/backend/program/scrapers/orionoid.py @@ -123,17 +123,19 @@ def api_scrape(self, item): with self.second_limiter: response = get(url, retry_if_failed=False, timeout=60) - - parsed_data_list = [] + if response.is_ok and len(response.data.data.streams) > 0: - data = {} - for stream in response.data.data.streams: - title = stream.file.name - parsed_data = parser.parse(item, title) - if parsed_data["fetch"] and stream.file.hash: - data[stream.file.hash] = {"name": title} - parsed_data_list.append(parsed_data) - if len(data) > 0: + parsed_data_list = [ + parser.parse(item, stream.file.name) + for stream in response.data.data.streams + if stream.file.hash + ] + data = { + stream.file.hash: {"name": stream.file.name} + for stream, parsed_data in zip(response.data.data.streams, parsed_data_list) + if parsed_data["fetch"] + } + if data: item.parsed_data = parsed_data_list return data return {} \ No newline at end of file diff --git a/backend/program/scrapers/torrentio.py b/backend/program/scrapers/torrentio.py index b4614b2a..5a2267ed 100644 --- a/backend/program/scrapers/torrentio.py +++ b/backend/program/scrapers/torrentio.py @@ -79,17 +79,17 @@ def api_scrape(self, item): url += identifier with self.second_limiter: response = get(f"{url}.json", retry_if_failed=False, timeout=60) - - parsed_data_list = [] if response.is_ok and len(response.data.streams) > 0: - data = {} - for stream in response.data.streams: - torrent = stream.title.split("\n👤")[0] - parsed_data = parser.parse(item, torrent) - if parsed_data.get("fetch", False): - data[stream.infoHash] = {"name": torrent} - parsed_data_list.append(parsed_data) - if len(data) > 0: + parsed_data_list = [ + parser.parse(item, stream.title.split("\n👤")[0]) + for stream in response.data.streams + ] + data = { + stream.infoHash: {"name": stream.title.split("\n👤")[0]} + for stream, parsed_data in zip(response.data.streams, parsed_data_list) + if parsed_data.get("fetch", False) + } + if data: item.parsed_data = parsed_data_list return data return {} From 1aa153a2db1917756de53f0ab0acd3606c4799a0 Mon Sep 17 00:00:00 2001 From: Spoked Date: Mon, 22 Jan 2024 11:18:45 -0500 Subject: [PATCH 04/65] fix: disable plex logging for id mismatches --- backend/program/plex.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/backend/program/plex.py b/backend/program/plex.py index aac5f72f..6b6a6545 100644 --- a/backend/program/plex.py +++ b/backend/program/plex.py @@ -221,16 +221,16 @@ def _map_item_from_data(item): # This is due to season 0 (specials) not having imdb ids. # Attempt to get the imdb id from the tvdb id if we don't have it. # Needs more testing.. - if not imdb_id: - logger.debug("Unable to find imdb, trying tvdb for %s", title) - tvdb_id = next( - (guid.id.split("://")[-1] for guid in guids if "tvdb" in guid.id), None - ) - if tvdb_id: - logger.debug("Unable to find imdb, but found tvdb: %s", tvdb_id) - imdb_id = get_imdbid_from_tvdb(tvdb_id) - if imdb_id: - logger.debug("Found imdb from tvdb: %s", imdb_id) + # if not imdb_id: + # logger.debug("Unable to find imdb, trying tvdb for %s", title) + # tvdb_id = next( + # (guid.id.split("://")[-1] for guid in guids if "tvdb" in guid.id), None + # ) + # if tvdb_id: + # logger.debug("Unable to find imdb, but found tvdb: %s", tvdb_id) + # imdb_id = get_imdbid_from_tvdb(tvdb_id) + # if imdb_id: + # logger.debug("Found imdb from tvdb: %s", imdb_id) media_item_data = { "title": title, From b3be2d537512000a8eb720657bce1afa89a878e9 Mon Sep 17 00:00:00 2001 From: Spoked Date: Wed, 24 Jan 2024 02:13:05 -0500 Subject: [PATCH 05/65] time for sleep. rework still wip. --- backend/program/plex.py | 21 +++--- backend/program/scrapers/jackett.py | 10 +-- backend/program/scrapers/orionoid.py | 11 ++- backend/program/scrapers/torrentio.py | 31 +++++---- backend/utils/parser.py | 97 ++++++++++++++++----------- backend/utils/request.py | 7 +- 6 files changed, 101 insertions(+), 76 deletions(-) diff --git a/backend/program/plex.py b/backend/program/plex.py index 6b6a6545..7fde4155 100644 --- a/backend/program/plex.py +++ b/backend/program/plex.py @@ -220,17 +220,16 @@ def _map_item_from_data(item): # All movies have imdb, but not all shows do. # This is due to season 0 (specials) not having imdb ids. # Attempt to get the imdb id from the tvdb id if we don't have it. - # Needs more testing.. - # if not imdb_id: - # logger.debug("Unable to find imdb, trying tvdb for %s", title) - # tvdb_id = next( - # (guid.id.split("://")[-1] for guid in guids if "tvdb" in guid.id), None - # ) - # if tvdb_id: - # logger.debug("Unable to find imdb, but found tvdb: %s", tvdb_id) - # imdb_id = get_imdbid_from_tvdb(tvdb_id) - # if imdb_id: - # logger.debug("Found imdb from tvdb: %s", imdb_id) + # Uses Trakt to get the imdb id from the tvdb id. + if not imdb_id: + tvdb_id = next( + (guid.id.split("://")[-1] for guid in guids if "tvdb" in guid.id), None + ) + if tvdb_id: + imdb_id = get_imdbid_from_tvdb(tvdb_id) + if imdb_id: + logger.debug("%s was missing IMDb ID, found IMDb ID from TVdb ID: %s", title, imdb_id) + # If we still don't have an imdb id, we could check TMdb or use external services like cinemeta. media_item_data = { "title": title, diff --git a/backend/program/scrapers/jackett.py b/backend/program/scrapers/jackett.py index 1a938876..7fcb6d63 100644 --- a/backend/program/scrapers/jackett.py +++ b/backend/program/scrapers/jackett.py @@ -59,10 +59,10 @@ def run(self, item): def _scrape_item(self, item): """Scrape the given media item""" - data = self.api_scrape(item) + data, stream_count = self.api_scrape(item) if len(data) > 0: item.streams.update(data) - logger.debug("Found %s streams for %s", len(data), item.log_string) + logger.debug("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) else: logger.debug("Could not find streams for %s", item.log_string) @@ -92,6 +92,6 @@ def api_scrape(self, item): infohash = infohash_attr.get("@value") data[infohash] = {"name": stream.get("title")} if data: - item.parsed_data = parsed_data_list - return data - return {} + item.parsed_data.extend(parsed_data_list) + return data, len(streams) + return {}, len(streams) or 0 diff --git a/backend/program/scrapers/orionoid.py b/backend/program/scrapers/orionoid.py index 8db1613e..4b424bc4 100644 --- a/backend/program/scrapers/orionoid.py +++ b/backend/program/scrapers/orionoid.py @@ -75,10 +75,10 @@ def run(self, item): return def _scrape_item(self, item): - data = self.api_scrape(item) + data, stream_count = self.api_scrape(item) if len(data) > 0: item.streams.update(data) - logger.debug("Found %s streams for %s", len(data), item.log_string) + logger.debug("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) else: logger.debug("Could not find streams for %s", item.log_string) @@ -123,7 +123,6 @@ def api_scrape(self, item): with self.second_limiter: response = get(url, retry_if_failed=False, timeout=60) - if response.is_ok and len(response.data.data.streams) > 0: parsed_data_list = [ parser.parse(item, stream.file.name) @@ -136,6 +135,6 @@ def api_scrape(self, item): if parsed_data["fetch"] } if data: - item.parsed_data = parsed_data_list - return data - return {} \ No newline at end of file + item.parsed_data.extend(parsed_data_list) + return data, len(response.data.data.streams) + return {}, len(response.data.data.streams) or 0 \ No newline at end of file diff --git a/backend/program/scrapers/torrentio.py b/backend/program/scrapers/torrentio.py index 5a2267ed..62231e68 100644 --- a/backend/program/scrapers/torrentio.py +++ b/backend/program/scrapers/torrentio.py @@ -1,7 +1,7 @@ """ Torrentio scraper module """ from typing import Optional from pydantic import BaseModel -from requests import HTTPError, ReadTimeout +from requests import ConnectTimeout, HTTPError, ReadTimeout from requests.exceptions import RequestException from utils.logger import logger from utils.request import RateLimitExceeded, get, RateLimiter @@ -39,21 +39,26 @@ def run(self, item) -> None: and update the object with scraped streams""" try: self._scrape_item(item) - except RequestException: - self.minute_limiter.limit_hit() - return except RateLimitExceeded: self.minute_limiter.limit_hit() logger.debug("Torrentio rate limit hit for item: %s", item.log_string) return + except ConnectTimeout: + self.minute_limiter.limit_hit() + logger.debug("Torrentio connection timeout for item: %s", item.log_string) + return + except RequestException as e: + self.minute_limiter.limit_hit() + logger.debug("Torrentio request status %s exception: %s", e.response.status_code, e.response.reason) + return def _scrape_item(self, item): - data = self.api_scrape(item) # Unpack the tuple to get data and stream_count + data, stream_count = self.api_scrape(item) # Unpack the tuple to get data and stream_count if len(data) > 0: item.streams.update(data) - logger.debug("Found %s streams for %s", len(data), item.log_string) + logger.debug("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) else: - logger.debug("Could not find streams for %s", item.log_string) + logger.debug("Could not find streams for %s out of %s", item.log_string, stream_count) def api_scrape(self, item): """Wrapper for torrentio scrape method""" @@ -81,15 +86,15 @@ def api_scrape(self, item): response = get(f"{url}.json", retry_if_failed=False, timeout=60) if response.is_ok and len(response.data.streams) > 0: parsed_data_list = [ - parser.parse(item, stream.title.split("\n👤")[0]) - for stream in response.data.streams + parser.parse(item, stream.title.split("\n👤")[0].split("\n")[0]) for stream in response.data.streams ] data = { - stream.infoHash: {"name": stream.title.split("\n👤")[0]} + stream.infoHash: {"name": stream.title.split("\n👤")[0].split("\n")[0]} for stream, parsed_data in zip(response.data.streams, parsed_data_list) if parsed_data.get("fetch", False) } if data: - item.parsed_data = parsed_data_list - return data - return {} + item.parsed_data.extend(parsed_data_list) + item.parsed = True + return data, len(response.data.streams) + return {}, len(response.data.streams) or 0 diff --git a/backend/utils/parser.py b/backend/utils/parser.py index 0bbae0e1..81538631 100644 --- a/backend/utils/parser.py +++ b/backend/utils/parser.py @@ -34,6 +34,9 @@ def parse(self, item, string) -> dict: def _parse(self, item, string) -> dict: """Parse the given string and return the parsed data.""" + if len(item.parsed_data) != 0: + return item.parsed_data + parse = PTN.parse(string) parsed_title = parse.get("title", "") @@ -47,10 +50,11 @@ def _parse(self, item, string) -> dict: else: episodes.append(int(episode)) - title_match = self.check_for_title_match(item, string) + title_match = self.check_for_title_match(item, parsed_title) is_4k = parse.get("resolution", False) in ["2160p", "4K", "UHD"] is_complete = self._is_complete_series(string) - is_dual_audio = self._is_dual_audio(item, string) + is_dual_audio = self._is_dual_audio(string) + _is_unwanted_quality = self._is_unwanted_quality(string) parsed_data = { "string": string, @@ -59,7 +63,7 @@ def _parse(self, item, string) -> dict: "is_4k": is_4k, "is_dual_audio": is_dual_audio, "is_complete": is_complete, - "_is_unwanted_quality": self._is_unwanted_quality(string), + "_is_unwanted_quality": _is_unwanted_quality, "year": parse.get("year", False), "resolution": parse.get("resolution", []), "quality": parse.get("quality", []), @@ -88,56 +92,59 @@ def episodes(self, string) -> List[int]: def episodes_in_season(self, season, string) -> List[int]: """Return a list of episodes in the given season.""" - parse = self._parse(string) + parse = self._parse(string=string) if parse["season"] == season: return parse["episodes"] return [] - def _should_fetch(self, parsed_data) -> bool: + def _should_fetch(self, item, parsed_data: list) -> bool: """Determine if the parsed content should be fetched.""" # This is where we determine if the item should be fetched # based on the user settings and predefined rules. # Edit with caution. All have to match for the item to be fetched. + item_language = [self._get_item_language(item)] return ( parsed_data["resolution"] in self.resolution and - any(lang in parsed_data.get("language", []) for lang in self.language) and + any(lang in parsed_data.get("language", item_language) for lang in self.language) and not parsed_data["is_unwanted"] ) - def _is_highest_quality(self, item) -> bool: + def _is_highest_quality(self, parsed_data: list) -> bool: """Check if content is `highest quality`.""" - return any([ - item.parsed_data.get("hdr", False), - item.parsed_data.get("remux", False), - item.parsed_data.get("resolution", False) in ["UHD", "2160p", "4K"], - item.parsed_data.get("upscaled", False) - ]) - - def _is_repack_or_proper(self, item) -> bool: + return any( + parsed.get("resolution") in ["UHD", "2160p", "4K"] or + parsed.get("hdr", False) or + parsed.get("remux", False) or + parsed.get("upscaled", False) + for parsed in parsed_data + ) + + def _is_repack_or_proper(self, parsed_data: list) -> bool: """Check if content is `repack` or `proper`.""" - return any([ - item.parsed_data.get("proper", False), - item.parsed_data.get("repack", False) - ]) - - def _is_dual_audio(self, item, string) -> bool: - """Check if content is `dual audio`.""" - if item.parsed_data.get("audio", False) == "Dual": - return True - patterns = [ + return any( + parsed.get("proper", False) or parsed.get("repack", False) + for parsed in parsed_data + ) + + def _is_dual_audio(self, string) -> bool: + """Check if any content in parsed_data has dual audio.""" + dual_audio_patterns = [ re.compile(r"\bmulti(?:ple)?[ .-]*(?:lang(?:uages?)?|audio|VF2)?\b", re.IGNORECASE), re.compile(r"\btri(?:ple)?[ .-]*(?:audio|dub\w*)\b", re.IGNORECASE), re.compile(r"\bdual[ .-]*(?:au?$|[aá]udio|line)\b", re.IGNORECASE), + re.compile(r"\bdual\b(?![ .-]*sub)", re.IGNORECASE), re.compile(r"\b(?:audio|dub(?:bed)?)[ .-]*dual\b", re.IGNORECASE), + re.compile(r"\bengl?(?:sub[A-Z]*)?\b", re.IGNORECASE), + re.compile(r"\beng?sub[A-Z]*\b", re.IGNORECASE), re.compile(r"\b(?:DUBBED|dublado|dubbing|DUBS?)\b", re.IGNORECASE), ] - return any(pattern.search(string) for pattern in patterns) + return any(pattern.search(string) for pattern in dual_audio_patterns) @staticmethod def _is_complete_series(string) -> bool: """Check if string is a `complete series`.""" - # Can be used on either movie or show item type - patterns = [ + # Can be used on either movie or show item types + series_patterns = [ re.compile(r"(?:\bthe\W)?(?:\bcomplete|collection|dvd)?\b[ .]?\bbox[ .-]?set\b", re.IGNORECASE), re.compile(r"(?:\bthe\W)?(?:\bcomplete|collection|dvd)?\b[ .]?\bmini[ .-]?series\b", re.IGNORECASE), re.compile(r"(?:\bthe\W)?(?:\bcomplete|full|all)\b.*\b(?:series|seasons|collection|episodes|set|pack|movies)\b", re.IGNORECASE), @@ -147,12 +154,12 @@ def _is_complete_series(string) -> bool: re.compile(r"\bcollection\b", re.IGNORECASE), re.compile(r"duology|trilogy|quadr[oi]logy|tetralogy|pentalogy|hexalogy|heptalogy|anthology|saga", re.IGNORECASE) ] - return any(pattern.search(string) for pattern in patterns) + return any(pattern.search(string) for pattern in series_patterns) @staticmethod def _is_unwanted_quality(string) -> bool: - """Check if string has an `unwanted` quality.""" - patterns = [ + """Check if string has an 'unwanted' quality. Default to False.""" + unwanted_patterns = [ re.compile(r"\b(?:H[DQ][ .-]*)?CAM(?:H[DQ])?(?:[ .-]*Rip)?\b", re.IGNORECASE), re.compile(r"\b(?:H[DQ][ .-]*)?S[ .-]*print\b", re.IGNORECASE), re.compile(r"\b(?:HD[ .-]*)?T(?:ELE)?S(?:YNC)?(?:Rip)?\b", re.IGNORECASE), @@ -167,20 +174,32 @@ def _is_unwanted_quality(string) -> bool: re.compile(r"\bR5\b", re.IGNORECASE), re.compile(r"\b(DivX|XviD)\b", re.IGNORECASE), ] - # Return False if any pattern matches (indicating unwanted quality) - # Default to True if no pattern matches - return not any(pattern.search(string) for pattern in patterns) + return not any(pattern.search(string) for pattern in unwanted_patterns) - def check_for_title_match(self, item, string, threshold = 90) -> bool: + def check_for_title_match(self, item, parsed_title, threshold=90) -> bool: """Check if the title matches PTN title using fuzzy matching.""" + target_title = item.title if item.type == "season": target_title = item.parent.title elif item.type == "episode": target_title = item.parent.parent.title - else: - target_title = item.title - match_score = fuzz.ratio(string.lower(), target_title.lower()) - return match_score >= threshold + match_score = fuzz.ratio(parsed_title.lower(), target_title.lower()) + if match_score >= threshold: + return True + return False + + def _get_item_language(self, item) -> str: + """Get the language of the item.""" + if item.type == "season": + if item.parent.language == "en": + return "English" + elif item.type == "episode": + if item.parent.parent.language == "en": + return "English" + if item.language == "en": + return "English" + # This is crap. Need to switch to using a dict instead. + return "English" # def sort_streams(streams: dict, parser: Parser) -> dict: diff --git a/backend/utils/request.py b/backend/utils/request.py index 9a020b4e..1fe29e49 100644 --- a/backend/utils/request.py +++ b/backend/utils/request.py @@ -32,10 +32,13 @@ def __init__(self, response: requests.Response, response_type=SimpleNamespace): def handle_response(self, response: requests.Response): """Handle different types of responses""" - if not self.is_ok and self.status_code not in [429, 520]: + if not self.is_ok and self.status_code not in [429, 520, 522]: logger.warning("Error: %s %s", response.status_code, response.content) + if self.status_code in [520, 522]: + # Cloudflare error from Torrentio + raise requests.exceptions.ConnectTimeout(response.content) if self.status_code not in [200, 201, 204]: - if self.status_code in [429, 520]: + if self.status_code in [429]: raise requests.exceptions.RequestException(response.content) return {} if len(response.content) > 0: From 0b8147d521e98519a8bc3cb6d569e5a3a2cad8d7 Mon Sep 17 00:00:00 2001 From: Spoked Date: Wed, 24 Jan 2024 21:25:46 -0500 Subject: [PATCH 06/65] feat: parser works. needs more work. language needs a rewrite. disabled for now. --- backend/program/media/item.py | 1 + backend/program/plex.py | 5 ++- backend/program/scrapers/jackett.py | 3 ++ backend/program/scrapers/orionoid.py | 3 ++ backend/program/scrapers/torrentio.py | 11 ++++-- backend/program/updaters/trakt.py | 11 +++--- backend/utils/parser.py | 48 ++++++++++++++------------- 7 files changed, 51 insertions(+), 31 deletions(-) diff --git a/backend/program/media/item.py b/backend/program/media/item.py index a715fa49..735606f2 100644 --- a/backend/program/media/item.py +++ b/backend/program/media/item.py @@ -27,6 +27,7 @@ def __init__(self, item): self.requested_by = item.get("requested_by", None) self.file = None self.folder = None + self.is_anime = False self.parsed = False self.parsed_data = item.get("parsed_data", []) diff --git a/backend/program/plex.py b/backend/program/plex.py index 7fde4155..bb195130 100644 --- a/backend/program/plex.py +++ b/backend/program/plex.py @@ -49,8 +49,9 @@ def __init__(self, media_items: MediaItemContainer): self.log_worker_count = False self.media_items = media_items self._update_items(init=True) - except Exception: + except Exception as e: logger.error("Plex is not configured!") + logger.error("Error: %s", e) return logger.info("Plex initialized!") self.initialized = True @@ -201,6 +202,7 @@ def _map_item_from_data(item): if item.type in ["movie", "episode"]: file = getattr(item, "locations", [None])[0].split("/")[-1] genres = [genre.tag for genre in getattr(item, "genres", [])] + is_anime = "anime" in genres title = getattr(item, "title", None) key = getattr(item, "key", None) season_number = getattr(item, "seasonNumber", None) @@ -241,6 +243,7 @@ def _map_item_from_data(item): "guid": guid, "art_url": art_url, "file": file, + "is_anime": is_anime, } # Instantiate the appropriate subclass based on 'item_type' diff --git a/backend/program/scrapers/jackett.py b/backend/program/scrapers/jackett.py index 7fcb6d63..b636cc16 100644 --- a/backend/program/scrapers/jackett.py +++ b/backend/program/scrapers/jackett.py @@ -91,7 +91,10 @@ def api_scrape(self, item): if infohash_attr: infohash = infohash_attr.get("@value") data[infohash] = {"name": stream.get("title")} + for parsed_data in parsed_data_list: + logger.debug("Jackett Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) if data: item.parsed_data.extend(parsed_data_list) + item.parsed_data.append({self.key: True}) return data, len(streams) return {}, len(streams) or 0 diff --git a/backend/program/scrapers/orionoid.py b/backend/program/scrapers/orionoid.py index 4b424bc4..0fd7fe63 100644 --- a/backend/program/scrapers/orionoid.py +++ b/backend/program/scrapers/orionoid.py @@ -134,7 +134,10 @@ def api_scrape(self, item): for stream, parsed_data in zip(response.data.data.streams, parsed_data_list) if parsed_data["fetch"] } + for parsed_data in parsed_data_list: + logger.debug("Orionoid Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) if data: item.parsed_data.extend(parsed_data_list) + item.parsed_data.append({self.key: True}) return data, len(response.data.data.streams) return {}, len(response.data.data.streams) or 0 \ No newline at end of file diff --git a/backend/program/scrapers/torrentio.py b/backend/program/scrapers/torrentio.py index 62231e68..9b8e3b2e 100644 --- a/backend/program/scrapers/torrentio.py +++ b/backend/program/scrapers/torrentio.py @@ -1,7 +1,7 @@ """ Torrentio scraper module """ from typing import Optional from pydantic import BaseModel -from requests import ConnectTimeout, HTTPError, ReadTimeout +from requests import ConnectTimeout, ReadTimeout from requests.exceptions import RequestException from utils.logger import logger from utils.request import RateLimitExceeded, get, RateLimiter @@ -47,6 +47,10 @@ def run(self, item) -> None: self.minute_limiter.limit_hit() logger.debug("Torrentio connection timeout for item: %s", item.log_string) return + except ReadTimeout: + self.minute_limiter.limit_hit() + logger.debug("Torrentio read timeout for item: %s", item.log_string) + return except RequestException as e: self.minute_limiter.limit_hit() logger.debug("Torrentio request status %s exception: %s", e.response.status_code, e.response.reason) @@ -91,10 +95,13 @@ def api_scrape(self, item): data = { stream.infoHash: {"name": stream.title.split("\n👤")[0].split("\n")[0]} for stream, parsed_data in zip(response.data.streams, parsed_data_list) - if parsed_data.get("fetch", False) + if parsed_data.get("fetch", False) and parsed_data.get("string", False) } + for parsed_data in parsed_data_list: + logger.debug("Torrentio Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) if data: item.parsed_data.extend(parsed_data_list) + item.parsed_data.append({self.key: True}) item.parsed = True return data, len(response.data.streams) return {}, len(response.data.streams) or 0 diff --git a/backend/program/updaters/trakt.py b/backend/program/updaters/trakt.py index 720b2538..89a1a61d 100644 --- a/backend/program/updaters/trakt.py +++ b/backend/program/updaters/trakt.py @@ -89,8 +89,7 @@ def _map_item_from_data(data, item_type): "title": getattr(data, "title", None), # 'Game of Thrones' "year": getattr(data, "year", None), # 2011 "status": getattr(data, "status", None), # 'ended', 'released', 'returning series' - "aired_at": formatted_aired_at, # datetime.datetime(2011, 4, 17, 0, 0) - "is_anime": is_anime, # True" + "aired_at": formatted_aired_at, # datetime.datetime(2011, 4, 17, 0, 0) # True" "imdb_id": getattr(data.ids, "imdb", None), # 'tt0496424' "tvdb_id": getattr(data.ids, "tvdb", None), # 79488 "tmdb_id": getattr(data.ids, "tmdb", None), # 1399 @@ -100,10 +99,13 @@ def _map_item_from_data(data, item_type): "language": getattr(data, "language", None), # 'en' "requested_at": datetime.now(), # datetime.datetime(2021, 4, 17, 0, 0) } + match item_type: case "movie": + item["is_anime"] = is_anime return_item = Movie(item) case "show": + item["is_anime"] = is_anime return_item = Show(item) case "season": item["number"] = getattr(data, "number") @@ -112,6 +114,7 @@ def _map_item_from_data(data, item_type): item["number"] = getattr(data, "number") return_item = Episode(item) case _: + logger.debug("Unknown item type %s for %s", item_type, data.title) return_item = None return return_item @@ -148,11 +151,9 @@ def create_item_from_imdb_id(imdb_id: str): data = response.data[0].season elif media_type == "episode": data = response.data[0].episode - else: - logger.debug("Unknown item %s with type %s", imdb_id, media_type) - return None if data: return _map_item_from_data(data, media_type) + logger.debug("Unknown item %s with type %s", imdb_id, media_type) return None def get_imdbid_from_tvdb(tvdb_id: str) -> str: diff --git a/backend/utils/parser.py b/backend/utils/parser.py index 81538631..e87ee08b 100644 --- a/backend/utils/parser.py +++ b/backend/utils/parser.py @@ -2,6 +2,7 @@ import PTN from typing import List from pydantic import BaseModel +from utils.logger import logger from utils.settings import settings_manager from thefuzz import fuzz @@ -30,13 +31,12 @@ def determine_resolution(self): def parse(self, item, string) -> dict: """Parse the given string and return True if it matches the user settings.""" + if len(item.parsed_data) != 0 or item.parsed: + return item.parsed_data return self._parse(item, string) def _parse(self, item, string) -> dict: """Parse the given string and return the parsed data.""" - if len(item.parsed_data) != 0: - return item.parsed_data - parse = PTN.parse(string) parsed_title = parse.get("title", "") @@ -60,10 +60,11 @@ def _parse(self, item, string) -> dict: "string": string, "parsed_title": parsed_title, "title_match": title_match, + "fetch": False, "is_4k": is_4k, "is_dual_audio": is_dual_audio, "is_complete": is_complete, - "_is_unwanted_quality": _is_unwanted_quality, + "is_unwanted_quality": _is_unwanted_quality, "year": parse.get("year", False), "resolution": parse.get("resolution", []), "quality": parse.get("quality", []), @@ -82,7 +83,7 @@ def _parse(self, item, string) -> dict: "extended": parse.get("extended", []) } - parsed_data["fetch"] = self._should_fetch(parsed_data) + parsed_data["fetch"] = self._should_fetch(item, parsed_data=parsed_data) return parsed_data def episodes(self, string) -> List[int]: @@ -97,19 +98,19 @@ def episodes_in_season(self, season, string) -> List[int]: return parse["episodes"] return [] - def _should_fetch(self, item, parsed_data: list) -> bool: + def _should_fetch(self, item, parsed_data: dict) -> bool: """Determine if the parsed content should be fetched.""" # This is where we determine if the item should be fetched # based on the user settings and predefined rules. # Edit with caution. All have to match for the item to be fetched. - item_language = [self._get_item_language(item)] + # item_language = self._get_item_language(item) return ( parsed_data["resolution"] in self.resolution and - any(lang in parsed_data.get("language", item_language) for lang in self.language) and - not parsed_data["is_unwanted"] + # any(lang in parsed_data.get("language", item_language) for lang in self.language) and + not parsed_data["is_unwanted_quality"] ) - def _is_highest_quality(self, parsed_data: list) -> bool: + def _is_highest_quality(self, parsed_data: dict) -> bool: """Check if content is `highest quality`.""" return any( parsed.get("resolution") in ["UHD", "2160p", "4K"] or @@ -119,13 +120,6 @@ def _is_highest_quality(self, parsed_data: list) -> bool: for parsed in parsed_data ) - def _is_repack_or_proper(self, parsed_data: list) -> bool: - """Check if content is `repack` or `proper`.""" - return any( - parsed.get("proper", False) or parsed.get("repack", False) - for parsed in parsed_data - ) - def _is_dual_audio(self, string) -> bool: """Check if any content in parsed_data has dual audio.""" dual_audio_patterns = [ @@ -174,7 +168,7 @@ def _is_unwanted_quality(string) -> bool: re.compile(r"\bR5\b", re.IGNORECASE), re.compile(r"\b(DivX|XviD)\b", re.IGNORECASE), ] - return not any(pattern.search(string) for pattern in unwanted_patterns) + return any(pattern.search(string) for pattern in unwanted_patterns) def check_for_title_match(self, item, parsed_title, threshold=90) -> bool: """Check if the title matches PTN title using fuzzy matching.""" @@ -190,16 +184,24 @@ def check_for_title_match(self, item, parsed_title, threshold=90) -> bool: def _get_item_language(self, item) -> str: """Get the language of the item.""" + # This is crap. Need to switch to using a dict instead. if item.type == "season": if item.parent.language == "en": - return "English" + if item.parent.is_anime: + return ["English", "Japanese"] + return ["English"] elif item.type == "episode": if item.parent.parent.language == "en": - return "English" + if item.parent.parent.is_anime: + return ["English", "Japanese"] + return ["English"] if item.language == "en": - return "English" - # This is crap. Need to switch to using a dict instead. - return "English" + if item.is_anime: + return ["English", "Japanese"] + return ["English"] + if item.is_anime: + return ["English", "Japanese"] + return ["English"] # def sort_streams(streams: dict, parser: Parser) -> dict: From 9826b556fe30144dfbe8a8c562ace67b262666a1 Mon Sep 17 00:00:00 2001 From: Spoked Date: Wed, 24 Jan 2024 21:47:46 -0500 Subject: [PATCH 07/65] fix: overseerr bug on using external ids --- backend/program/content/overseerr.py | 51 +++++++++++++-------------- backend/program/scrapers/jackett.py | 2 +- backend/program/scrapers/orionoid.py | 2 +- backend/program/scrapers/torrentio.py | 2 +- 4 files changed, 27 insertions(+), 30 deletions(-) diff --git a/backend/program/content/overseerr.py b/backend/program/content/overseerr.py index a6397d16..15bf5760 100644 --- a/backend/program/content/overseerr.py +++ b/backend/program/content/overseerr.py @@ -96,31 +96,28 @@ def get_imdb_id(self, overseerr_item): self.settings.url + f"/api/v1/{overseerr_item.mediaType}/{external_id}?language=en", additional_headers=self.headers, ) - if response.is_ok and response.data.externalIds: - imdb_id = response.data.externalIds.imdbId - if imdb_id: - return imdb_id - elif not imdb_id and response.data.externalIds.tvdbId: - imdb_id = get_imdbid_from_tvdb(response.data.externalIds.tvdbId) - if imdb_id: - logger.debug( - "Could not find imdbId for %s but found it from tvdbId %s", - overseerr_item.title, - response.data.externalIds.tvdbId - ) - return imdb_id - elif not imdb_id and response.data.externalIds.tmdbId: - imdb_id = get_imdbid_from_tmdb(response.data.externalIds.tmdbId) - if imdb_id: - logger.debug( - "Could not find imdbId for %s but found it from tmdbId %s", - overseerr_item.title, - response.data.externalIds.tmdbId - ) - return imdb_id - self.not_found_ids.append(f"{id_extension}{external_id}") - title = getattr(response.data, "title", None) or getattr( - response.data, "originalName", None - ) - logger.debug("Could not get imdbId for %s, or match with external id", title) + if not response.is_ok or not hasattr(response.data, "externalIds"): + logger.debug(f"Failed to fetch or no externalIds for {id_extension}{external_id}") + return None + + title = getattr(response.data, "title", None) or getattr(response.data, "originalName", None) + + # Try to get IMDb ID directly + imdb_id = getattr(response.data.externalIds, 'imdbId', None) + if imdb_id: + return imdb_id + + # Try alternate IDs if IMDb ID is not available + alternate_ids = [('tvdbId', get_imdbid_from_tvdb), ('tmdbId', get_imdbid_from_tmdb)] + for id_attr, fetcher in alternate_ids: + external_id_value = getattr(response.data.externalIds, id_attr, None) + if external_id_value: + new_imdb_id = fetcher(external_id_value) + if new_imdb_id: + logger.debug(f"Found imdbId for {title} from {id_attr}: {external_id_value}") + return new_imdb_id + + # Log and append to not found if IMDb ID is still not found + self.not_found_ids.append(f"{id_extension}{external_id}") + logger.debug(f"Could not get imdbId for {title}, or match with external id") return None diff --git a/backend/program/scrapers/jackett.py b/backend/program/scrapers/jackett.py index b636cc16..7ce92113 100644 --- a/backend/program/scrapers/jackett.py +++ b/backend/program/scrapers/jackett.py @@ -62,7 +62,7 @@ def _scrape_item(self, item): data, stream_count = self.api_scrape(item) if len(data) > 0: item.streams.update(data) - logger.debug("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) + logger.info("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) else: logger.debug("Could not find streams for %s", item.log_string) diff --git a/backend/program/scrapers/orionoid.py b/backend/program/scrapers/orionoid.py index 0fd7fe63..b815c910 100644 --- a/backend/program/scrapers/orionoid.py +++ b/backend/program/scrapers/orionoid.py @@ -78,7 +78,7 @@ def _scrape_item(self, item): data, stream_count = self.api_scrape(item) if len(data) > 0: item.streams.update(data) - logger.debug("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) + logger.info("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) else: logger.debug("Could not find streams for %s", item.log_string) diff --git a/backend/program/scrapers/torrentio.py b/backend/program/scrapers/torrentio.py index 9b8e3b2e..f0414195 100644 --- a/backend/program/scrapers/torrentio.py +++ b/backend/program/scrapers/torrentio.py @@ -60,7 +60,7 @@ def _scrape_item(self, item): data, stream_count = self.api_scrape(item) # Unpack the tuple to get data and stream_count if len(data) > 0: item.streams.update(data) - logger.debug("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) + logger.info("Found %s streams out of %s for %s", len(data), stream_count, item.log_string) else: logger.debug("Could not find streams for %s out of %s", item.log_string, stream_count) From 55b10d2581b67cd9393aa71399edd8b738bcf7f9 Mon Sep 17 00:00:00 2001 From: Spoked Date: Wed, 24 Jan 2024 22:48:09 -0500 Subject: [PATCH 08/65] fix: remove plex debug line for users --- backend/program/plex.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/backend/program/plex.py b/backend/program/plex.py index bb195130..ea544d7e 100644 --- a/backend/program/plex.py +++ b/backend/program/plex.py @@ -49,9 +49,8 @@ def __init__(self, media_items: MediaItemContainer): self.log_worker_count = False self.media_items = media_items self._update_items(init=True) - except Exception as e: + except Exception: logger.error("Plex is not configured!") - logger.error("Error: %s", e) return logger.info("Plex initialized!") self.initialized = True From 570c44550b4ebc4445ea843bdcbcfc079a2dc239 Mon Sep 17 00:00:00 2001 From: Spoked Date: Wed, 24 Jan 2024 23:04:17 -0500 Subject: [PATCH 09/65] disable tvdb checks from listrr, overseerr, plex. needs reworked. --- backend/program/content/listrr.py | 10 +++++----- backend/program/content/overseerr.py | 3 ++- backend/program/plex.py | 16 ++++++++-------- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/backend/program/content/listrr.py b/backend/program/content/listrr.py index fba7f8b6..eed823bc 100644 --- a/backend/program/content/listrr.py +++ b/backend/program/content/listrr.py @@ -96,11 +96,11 @@ def _get_items_from_Listrr(self, content_type, content_lists): imdb_id = item.imDbId if imdb_id: unique_ids.add(imdb_id) - elif content_type == "Shows" and item.tvDbId: - imdb_id = get_imdbid_from_tvdb(item.tvDbId) - if imdb_id: - unique_ids.add(imdb_id) - elif content_type == "Movies" and item.tmDbId: + # elif content_type == "Shows" and item.tvDbId: + # imdb_id = get_imdbid_from_tvdb(item.tvDbId) + # if imdb_id: + # unique_ids.add(imdb_id) + if not imdb_id and content_type == "Movies" and item.tmDbId: imdb_id = get_imdbid_from_tmdb(item.tmDbId) if imdb_id: unique_ids.add(imdb_id) diff --git a/backend/program/content/overseerr.py b/backend/program/content/overseerr.py index 15bf5760..e13b2348 100644 --- a/backend/program/content/overseerr.py +++ b/backend/program/content/overseerr.py @@ -108,7 +108,8 @@ def get_imdb_id(self, overseerr_item): return imdb_id # Try alternate IDs if IMDb ID is not available - alternate_ids = [('tvdbId', get_imdbid_from_tvdb), ('tmdbId', get_imdbid_from_tmdb)] + # alternate_ids = [('tvdbId', get_imdbid_from_tvdb), ('tmdbId', get_imdbid_from_tmdb)] + alternate_ids = [('tmdbId', get_imdbid_from_tmdb)] for id_attr, fetcher in alternate_ids: external_id_value = getattr(response.data.externalIds, id_attr, None) if external_id_value: diff --git a/backend/program/plex.py b/backend/program/plex.py index ea544d7e..36c9848a 100644 --- a/backend/program/plex.py +++ b/backend/program/plex.py @@ -222,14 +222,14 @@ def _map_item_from_data(item): # This is due to season 0 (specials) not having imdb ids. # Attempt to get the imdb id from the tvdb id if we don't have it. # Uses Trakt to get the imdb id from the tvdb id. - if not imdb_id: - tvdb_id = next( - (guid.id.split("://")[-1] for guid in guids if "tvdb" in guid.id), None - ) - if tvdb_id: - imdb_id = get_imdbid_from_tvdb(tvdb_id) - if imdb_id: - logger.debug("%s was missing IMDb ID, found IMDb ID from TVdb ID: %s", title, imdb_id) + # if not imdb_id: + # tvdb_id = next( + # (guid.id.split("://")[-1] for guid in guids if "tvdb" in guid.id), None + # ) + # if tvdb_id: + # imdb_id = get_imdbid_from_tvdb(tvdb_id) + # if imdb_id: + # logger.debug("%s was missing IMDb ID, found IMDb ID from TVdb ID: %s", title, imdb_id) # If we still don't have an imdb id, we could check TMdb or use external services like cinemeta. media_item_data = { From 700c10052adfc4a5113aef114c549ac1fb654167 Mon Sep 17 00:00:00 2001 From: Spoked Date: Thu, 25 Jan 2024 00:05:57 -0500 Subject: [PATCH 10/65] set torrentio to disabled by default. removed parse logs. raised torrentio limit slightly. --- backend/program/scrapers/jackett.py | 4 ++-- backend/program/scrapers/orionoid.py | 4 ++-- backend/program/scrapers/torrentio.py | 6 +++--- backend/utils/default_settings.json | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/backend/program/scrapers/jackett.py b/backend/program/scrapers/jackett.py index 7ce92113..dca912bd 100644 --- a/backend/program/scrapers/jackett.py +++ b/backend/program/scrapers/jackett.py @@ -91,8 +91,8 @@ def api_scrape(self, item): if infohash_attr: infohash = infohash_attr.get("@value") data[infohash] = {"name": stream.get("title")} - for parsed_data in parsed_data_list: - logger.debug("Jackett Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) + # for parsed_data in parsed_data_list: + # logger.debug("Jackett Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) if data: item.parsed_data.extend(parsed_data_list) item.parsed_data.append({self.key: True}) diff --git a/backend/program/scrapers/orionoid.py b/backend/program/scrapers/orionoid.py index b815c910..c89a21c2 100644 --- a/backend/program/scrapers/orionoid.py +++ b/backend/program/scrapers/orionoid.py @@ -134,8 +134,8 @@ def api_scrape(self, item): for stream, parsed_data in zip(response.data.data.streams, parsed_data_list) if parsed_data["fetch"] } - for parsed_data in parsed_data_list: - logger.debug("Orionoid Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) + # for parsed_data in parsed_data_list: + # logger.debug("Orionoid Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) if data: item.parsed_data.extend(parsed_data_list) item.parsed_data.append({self.key: True}) diff --git a/backend/program/scrapers/torrentio.py b/backend/program/scrapers/torrentio.py index f0414195..a8bda5d4 100644 --- a/backend/program/scrapers/torrentio.py +++ b/backend/program/scrapers/torrentio.py @@ -21,7 +21,7 @@ def __init__(self, _): self.key = "torrentio" self.settings = TorrentioConfig(**settings_manager.get(f"scraping.{self.key}")) self.minute_limiter = RateLimiter(max_calls=60, period=60, raise_on_limit=True) - self.second_limiter = RateLimiter(max_calls=1, period=5) + self.second_limiter = RateLimiter(max_calls=1, period=7) self.initialized = self.validate_settings() if not self.initialized: return @@ -97,8 +97,8 @@ def api_scrape(self, item): for stream, parsed_data in zip(response.data.streams, parsed_data_list) if parsed_data.get("fetch", False) and parsed_data.get("string", False) } - for parsed_data in parsed_data_list: - logger.debug("Torrentio Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) + # for parsed_data in parsed_data_list: + # logger.debug("Torrentio Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) if data: item.parsed_data.extend(parsed_data_list) item.parsed_data.append({self.key: True}) diff --git a/backend/utils/default_settings.json b/backend/utils/default_settings.json index 2f13f05f..7dbafe00 100644 --- a/backend/utils/default_settings.json +++ b/backend/utils/default_settings.json @@ -43,8 +43,8 @@ "after_5": 2, "after_10": 24, "torrentio": { - "enabled": true, - "filter": "sort=qualitysize%7Cqualityfilter=480p,scr,cam,unknown" + "enabled": false, + "filter": "sort=qualitysize%7Cqualityfilter=480p,scr,cam" }, "orionoid": { "enabled": false, From 8dc29e1ba32a5f8e135336925feb4ea0d5ba5a0b Mon Sep 17 00:00:00 2001 From: Spoked Date: Thu, 25 Jan 2024 00:07:29 -0500 Subject: [PATCH 11/65] Set all default settings to disabled by default for onboarding --- backend/utils/default_settings.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/utils/default_settings.json b/backend/utils/default_settings.json index 7dbafe00..d4ead597 100644 --- a/backend/utils/default_settings.json +++ b/backend/utils/default_settings.json @@ -33,7 +33,7 @@ "update_interval": 80 }, "overseerr": { - "enabled": true, + "enabled": false, "url": "http://localhost:5055", "api_key": "" } From ed2181f99e7662c6b77f0cfb7538ab1865d3c802 Mon Sep 17 00:00:00 2001 From: Spoked Date: Thu, 25 Jan 2024 00:11:01 -0500 Subject: [PATCH 12/65] add extra logging attr. for debugging large groups of data. --- backend/program/scrapers/jackett.py | 6 ++++-- backend/program/scrapers/orionoid.py | 6 ++++-- backend/program/scrapers/torrentio.py | 6 ++++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/backend/program/scrapers/jackett.py b/backend/program/scrapers/jackett.py index dca912bd..df142099 100644 --- a/backend/program/scrapers/jackett.py +++ b/backend/program/scrapers/jackett.py @@ -23,6 +23,7 @@ def __init__(self, _): self.initialized = self.validate_settings() if not self.initialized and not self.api_key: return + self.parse_logging = False self.minute_limiter = RateLimiter(max_calls=60, period=60, raise_on_limit=True) self.second_limiter = RateLimiter(max_calls=1, period=3) logger.info("Jackett initialized!") @@ -91,8 +92,9 @@ def api_scrape(self, item): if infohash_attr: infohash = infohash_attr.get("@value") data[infohash] = {"name": stream.get("title")} - # for parsed_data in parsed_data_list: - # logger.debug("Jackett Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) + if self.parse_logging: + for parsed_data in parsed_data_list: + logger.debug("Jackett Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) if data: item.parsed_data.extend(parsed_data_list) item.parsed_data.append({self.key: True}) diff --git a/backend/program/scrapers/orionoid.py b/backend/program/scrapers/orionoid.py index c89a21c2..8e33669f 100644 --- a/backend/program/scrapers/orionoid.py +++ b/backend/program/scrapers/orionoid.py @@ -28,6 +28,7 @@ def __init__(self, _): self.initialized = True else: return + self.parse_logging = False self.max_calls = 50 if not self.is_premium else 60 self.period = 86400 if not self.is_premium else 60 self.minute_limiter = RateLimiter(max_calls=self.max_calls, period=self.period, raise_on_limit=True) @@ -134,8 +135,9 @@ def api_scrape(self, item): for stream, parsed_data in zip(response.data.data.streams, parsed_data_list) if parsed_data["fetch"] } - # for parsed_data in parsed_data_list: - # logger.debug("Orionoid Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) + if self.parse_logging: + for parsed_data in parsed_data_list: + logger.debug("Orionoid Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) if data: item.parsed_data.extend(parsed_data_list) item.parsed_data.append({self.key: True}) diff --git a/backend/program/scrapers/torrentio.py b/backend/program/scrapers/torrentio.py index a8bda5d4..ccd8bcdc 100644 --- a/backend/program/scrapers/torrentio.py +++ b/backend/program/scrapers/torrentio.py @@ -25,6 +25,7 @@ def __init__(self, _): self.initialized = self.validate_settings() if not self.initialized: return + self.parse_logging = False logger.info("Torrentio initialized!") def validate_settings(self) -> bool: @@ -97,8 +98,9 @@ def api_scrape(self, item): for stream, parsed_data in zip(response.data.streams, parsed_data_list) if parsed_data.get("fetch", False) and parsed_data.get("string", False) } - # for parsed_data in parsed_data_list: - # logger.debug("Torrentio Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) + if self.parse_logging: + for parsed_data in parsed_data_list: + logger.debug("Torrentio Fetch: %s - Parsed item: %s", parsed_data["fetch"], parsed_data["string"]) if data: item.parsed_data.extend(parsed_data_list) item.parsed_data.append({self.key: True}) From 50b5464cb3c2ec938c85676f8d137b3b0ed111e0 Mon Sep 17 00:00:00 2001 From: Ayush Sehrawat Date: Thu, 25 Jan 2024 14:39:29 +0530 Subject: [PATCH 13/65] feat: started status page rewrite --- frontend/package.json | 1 + frontend/pnpm-lock.yaml | 25 +++++ frontend/src/app.html | 4 +- .../lib/components/status-media-card.svelte | 65 ------------- .../ui/carousel/carousel-content.svelte | 22 +++++ .../ui/carousel/carousel-item.svelte | 25 +++++ .../ui/carousel/carousel-next.svelte | 35 +++++++ .../ui/carousel/carousel-previous.svelte | 36 +++++++ .../components/ui/carousel/carousel.svelte | 94 +++++++++++++++++++ .../src/lib/components/ui/carousel/context.ts | 49 ++++++++++ .../src/lib/components/ui/carousel/index.ts | 5 + frontend/src/lib/forms/content-form.svelte | 10 +- frontend/src/lib/forms/scrapers-form.svelte | 2 +- frontend/src/routes/status/+page.svelte | 75 +++++++-------- 14 files changed, 337 insertions(+), 111 deletions(-) delete mode 100644 frontend/src/lib/components/status-media-card.svelte create mode 100644 frontend/src/lib/components/ui/carousel/carousel-content.svelte create mode 100644 frontend/src/lib/components/ui/carousel/carousel-item.svelte create mode 100644 frontend/src/lib/components/ui/carousel/carousel-next.svelte create mode 100644 frontend/src/lib/components/ui/carousel/carousel-previous.svelte create mode 100644 frontend/src/lib/components/ui/carousel/carousel.svelte create mode 100644 frontend/src/lib/components/ui/carousel/context.ts create mode 100644 frontend/src/lib/components/ui/carousel/index.ts diff --git a/frontend/package.json b/frontend/package.json index 300cf1b0..dd813cc3 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -42,6 +42,7 @@ "bits-ui": "^0.15.1", "clsx": "^2.0.0", "cmdk-sv": "^0.0.12", + "embla-carousel-svelte": "8.0.0-rc20", "formsnap": "^0.4.2", "lucide-svelte": "^0.314.0", "luxon": "^3.4.4", diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index 1ade38ad..38a5fb67 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -14,6 +14,9 @@ dependencies: cmdk-sv: specifier: ^0.0.12 version: 0.0.12(svelte@4.2.9) + embla-carousel-svelte: + specifier: 8.0.0-rc20 + version: 8.0.0-rc20(svelte@4.2.9) formsnap: specifier: ^0.4.2 version: 0.4.2(svelte@4.2.9)(sveltekit-superforms@1.13.4)(zod@3.22.4) @@ -1368,6 +1371,28 @@ packages: resolution: {integrity: sha512-M4+u22ZJGpk4RY7tne6W+APkZhnnhmAH48FNl8iEFK2lEgob+U5rUQsIqQhvAwCXYpfd3H20pHK/ENsCvwTbsA==} dev: true + /embla-carousel-reactive-utils@8.0.0-rc20(embla-carousel@8.0.0-rc20): + resolution: {integrity: sha512-fE7IeSS8HqwDnTDMP8eo0i4pcYQAemmJq53zCLXnp3Yj/p5+IpB1nC7aKQjd2ug1dGOSwwNRFaPI3shlAVVW/A==} + peerDependencies: + embla-carousel: 8.0.0-rc20 + dependencies: + embla-carousel: 8.0.0-rc20 + dev: false + + /embla-carousel-svelte@8.0.0-rc20(svelte@4.2.9): + resolution: {integrity: sha512-MpON0Pw1EcYMjJt1VCnDk+HXTQrNwyHTlhdQ/WFx5QrXOpqvSup1nXKiLYsjxKkwBv5vYU9e04akNdqEJQ3iIg==} + peerDependencies: + svelte: ^3.49.0 || ^4.0.0 + dependencies: + embla-carousel: 8.0.0-rc20 + embla-carousel-reactive-utils: 8.0.0-rc20(embla-carousel@8.0.0-rc20) + svelte: 4.2.9 + dev: false + + /embla-carousel@8.0.0-rc20: + resolution: {integrity: sha512-fhzhbIAcsjSpUsg5jWsg0+zVyJhY5x2SPXtuS4MPAWQWoVQpvkcbX9r0FvPBn6emTbgNFRtAcWczstJy2msdUw==} + dev: false + /emoji-regex@8.0.0: resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} diff --git a/frontend/src/app.html b/frontend/src/app.html index bd30208f..1d87e0e6 100644 --- a/frontend/src/app.html +++ b/frontend/src/app.html @@ -23,14 +23,14 @@ /> - import type { IcebergItem, StatusInterface } from '$lib/types'; - import { formatWords, formatDate } from '$lib/helpers'; - import { Badge } from '$lib/components/ui/badge'; - - export let plexDebridItem: IcebergItem; - export let itemState: StatusInterface; - - let fallback = 'https://via.placeholder.com/198x228.png?text=No+thumbnail'; - let poster = `https://images.metahub.space/poster/small/${plexDebridItem.imdb_id}/img`; - let banner = `https://images.metahub.space/background/medium/${plexDebridItem.imdb_id}/img`; - - // TODO: Use item ID to show more data - // TODO: Make use of type - - -
-
-
-
- - test (poster = fallback)} - class=" w-[4.5rem] min-w-[4.5rem] h-24 rounded-md hover:scale-105 transition-all ease-in-out duration-300" - /> - -
-

- {plexDebridItem.title} -

-

Aired {formatDate(plexDebridItem.aired_at, 'short')}

-
- {#each plexDebridItem.genres as genre} - - {formatWords(genre)} - - {/each} -
-
-
-
-
-

Status

- - {itemState.text ?? formatWords(plexDebridItem.state)} - -
-
-

Requested

-

{formatDate(plexDebridItem.requested_at, 'long', true)}

-
-
-

Requested by

-

{plexDebridItem.requested_by}

-
-
-
-
diff --git a/frontend/src/lib/components/ui/carousel/carousel-content.svelte b/frontend/src/lib/components/ui/carousel/carousel-content.svelte new file mode 100644 index 00000000..21a69363 --- /dev/null +++ b/frontend/src/lib/components/ui/carousel/carousel-content.svelte @@ -0,0 +1,22 @@ + + +
+
+ +
+
diff --git a/frontend/src/lib/components/ui/carousel/carousel-item.svelte b/frontend/src/lib/components/ui/carousel/carousel-item.svelte new file mode 100644 index 00000000..aafd7f13 --- /dev/null +++ b/frontend/src/lib/components/ui/carousel/carousel-item.svelte @@ -0,0 +1,25 @@ + + +
+ +
diff --git a/frontend/src/lib/components/ui/carousel/carousel-next.svelte b/frontend/src/lib/components/ui/carousel/carousel-next.svelte new file mode 100644 index 00000000..94fb33b9 --- /dev/null +++ b/frontend/src/lib/components/ui/carousel/carousel-next.svelte @@ -0,0 +1,35 @@ + + + diff --git a/frontend/src/lib/components/ui/carousel/carousel-previous.svelte b/frontend/src/lib/components/ui/carousel/carousel-previous.svelte new file mode 100644 index 00000000..5a0645a2 --- /dev/null +++ b/frontend/src/lib/components/ui/carousel/carousel-previous.svelte @@ -0,0 +1,36 @@ + + + diff --git a/frontend/src/lib/components/ui/carousel/carousel.svelte b/frontend/src/lib/components/ui/carousel/carousel.svelte new file mode 100644 index 00000000..acf822d8 --- /dev/null +++ b/frontend/src/lib/components/ui/carousel/carousel.svelte @@ -0,0 +1,94 @@ + + +
+ +
diff --git a/frontend/src/lib/components/ui/carousel/context.ts b/frontend/src/lib/components/ui/carousel/context.ts new file mode 100644 index 00000000..0d00c1df --- /dev/null +++ b/frontend/src/lib/components/ui/carousel/context.ts @@ -0,0 +1,49 @@ +import type { EmblaCarouselSvelteType } from "embla-carousel-svelte"; +import type emblaCarouselSvelte from "embla-carousel-svelte"; +import { getContext, hasContext, setContext } from "svelte"; +import type { HTMLAttributes } from "svelte/elements"; +import type { Writable, Readable } from "svelte/store"; + +export type CarouselAPI = NonNullable< + NonNullable["on:emblaInit"] +> extends (evt: CustomEvent) => void + ? CarouselAPI + : never; + +type EmblaCarouselConfig = NonNullable[1]>; + +export type CarouselOptions = EmblaCarouselConfig["options"]; +export type CarouselPlugins = EmblaCarouselConfig["plugins"]; + +//// + +export type CarouselProps = { + opts?: CarouselOptions; + plugins?: CarouselPlugins; + api?: CarouselAPI; + orientation?: "horizontal" | "vertical"; +} & HTMLAttributes; + +const EMBLA_CAROUSEL_CONTEXT = Symbol("EMBLA_CAROUSEL_CONTEXT"); + +type EmblaContext = { + api: Writable; + orientation: Writable<"horizontal" | "vertical">; + scrollNext: () => void; + scrollPrev: () => void; + canScrollNext: Readable; + canScrollPrev: Readable; + handleKeyDown: (e: KeyboardEvent) => void; +}; + +export function setEmblaContex(config: EmblaContext): EmblaContext { + setContext(EMBLA_CAROUSEL_CONTEXT, config); + return config; +} + +export function getEmblaContext(name = "This component") { + if (!hasContext(EMBLA_CAROUSEL_CONTEXT)) { + throw new Error(`${name} must be used within a component`); + } + return getContext>(EMBLA_CAROUSEL_CONTEXT); +} diff --git a/frontend/src/lib/components/ui/carousel/index.ts b/frontend/src/lib/components/ui/carousel/index.ts new file mode 100644 index 00000000..78102bf7 --- /dev/null +++ b/frontend/src/lib/components/ui/carousel/index.ts @@ -0,0 +1,5 @@ +export { default as Root } from "./carousel.svelte"; +export { default as Content } from "./carousel-content.svelte"; +export { default as Item } from "./carousel-item.svelte"; +export { default as Previous } from "./carousel-previous.svelte"; +export { default as Next } from "./carousel-next.svelte"; diff --git a/frontend/src/lib/forms/content-form.svelte b/frontend/src/lib/forms/content-form.svelte index 04fccb28..b75968a8 100644 --- a/frontend/src/lib/forms/content-form.svelte +++ b/frontend/src/lib/forms/content-form.svelte @@ -125,7 +125,7 @@ debug={formDebug} >
-
+

Content Providers

@@ -276,7 +276,7 @@ {$mdblistListsErrors} {/if} -
+
@@ -378,7 +378,7 @@ {$listrrShowListsErrors} {/if} -
+
@@ -390,7 +390,6 @@ >
@@ -410,7 +409,7 @@
-
+
@@ -422,7 +421,6 @@ >
diff --git a/frontend/src/lib/forms/scrapers-form.svelte b/frontend/src/lib/forms/scrapers-form.svelte index 54e0c789..2e439ff5 100644 --- a/frontend/src/lib/forms/scrapers-form.svelte +++ b/frontend/src/lib/forms/scrapers-form.svelte @@ -72,7 +72,7 @@ {/if} -
+

Scrapers Enabled

diff --git a/frontend/src/routes/status/+page.svelte b/frontend/src/routes/status/+page.svelte index 7e5dddf5..5443ff1a 100644 --- a/frontend/src/routes/status/+page.svelte +++ b/frontend/src/routes/status/+page.svelte @@ -1,14 +1,13 @@ + + diff --git a/frontend/src/lib/forms/content-form.svelte b/frontend/src/lib/forms/content-form.svelte new file mode 100644 index 00000000..98bde86c --- /dev/null +++ b/frontend/src/lib/forms/content-form.svelte @@ -0,0 +1,480 @@ + + + +
+
+

+ Content Providers +

+
+ +
+ + Overseerr +
+
+ + +
+ + Mdblist +
+
+ + +
+ + Plex Watchlists +
+
+ + +
+ + Listrr +
+
+
+
+ + {#if $form.overseerr_enabled} +
+ + + + Overseerr URL + + + + {#if $errors.overseerr_url} + + {/if} + +
+ +
+ + + + Overseerr API Key + + 0 + })} + spellcheck="false" + /> + + {#if $errors.overseerr_api_key} + + {/if} + +
+ {/if} + + {#if $form.plex_watchlist_enabled} +
+ + + + Plex RSS URL + + + + {#if $errors.plex_watchlist_rss} + + {/if} + +
+ +
+ + + + Plex RSS Update Interval + + + + {#if $errors.plex_watchlist_update_interval} + + {/if} + +
+ {/if} + + + + + {#if $form.mdblist_enabled} +
+ + + + Mdblist API Key + + 0 + })} + spellcheck="false" + /> + + {#if $errors.mdblist_api_key} + + {/if} + +
+ +
+ + + + Mdblist Update Interval + + + + {#if $errors.mdblist_update_interval} + + {/if} + +
+ + {#if $mdblistListsErrors} + {$mdblistListsErrors} + {/if} + +
+ +
{ + addToList(event, 'mdblist'); + }} + class="w-full flex flex-col gap-4 items-start" + > + +
+ {#each $mdblistListsValues.filter((list) => list !== '') as list (list)} + + {/each} +
+
+
+ {/if} + + + + + + {#if $form.listrr_enabled} +
+ + + + Listrr API Key + + 0 + })} + spellcheck="false" + /> + + {#if $errors.listrr_api_key} + + {/if} + +
+ +
+ + + + Listrr Update Interval + + + + {#if $errors.listrr_update_interval} + + {/if} + +
+ + {#if $listrrMovieListsErrors} + {$listrrMovieListsErrors} + {/if} + {#if $listrrShowListsErrors} + {$listrrShowListsErrors} + {/if} + +
+ +
{ + addToList(event, 'listrr_movie'); + }} + class="w-full flex flex-col gap-4 items-start" + > + +
+ {#each $listrrMovieListsValues.filter((list) => list !== '') as list (list)} + + {/each} +
+
+
+ +
+ +
{ + addToList(event, 'listrr_show'); + }} + class="w-full flex flex-col gap-4 items-start" + > + +
+ {#each $listrrShowListsValues.filter((list) => list !== '') as list (list)} + + {/each} +
+
+
+ {/if} + + +
+ +
+
+
diff --git a/frontend/src/lib/forms/general-form.svelte b/frontend/src/lib/forms/general-form.svelte new file mode 100644 index 00000000..0ae31bbf --- /dev/null +++ b/frontend/src/lib/forms/general-form.svelte @@ -0,0 +1,113 @@ + + + +
+ + + + Debug * + + + + {#if $errors.debug} + + {/if} + + + + + + Log * + + + + {#if $errors.log} + + {/if} + + + + + + Host Path + + + + {#if $errors.host_path} + + {/if} + + + + + + Container Path + + + + {#if $errors.container_path} + + {/if} + + + + + + Real Debrid API Key + + 0 + })} + spellcheck="false" + /> + + {#if $errors.realdebrid_api_key} + + {/if} + + + +
+ +
+
+
diff --git a/frontend/src/lib/forms/helpers.ts b/frontend/src/lib/forms/helpers.ts new file mode 100644 index 00000000..05d165a0 --- /dev/null +++ b/frontend/src/lib/forms/helpers.ts @@ -0,0 +1,167 @@ +import { type SuperValidated } from 'sveltekit-superforms'; +import type { + GeneralSettingsSchema, + ContentSettingsSchema, + MediaServerSettingsSchema, + ScrapersSettingsSchema +} from '$lib/schemas/setting'; + +// General Settings ----------------------------------------------------------------------------------- +export const generalSettingsToGet: string[] = ['debug', 'log', 'symlink', 'real_debrid']; + +export function generalSettingsToPass(data: any) { + return { + debug: data.data.debug, + log: data.data.log, + host_path: data.data.symlink.host_path, + container_path: data.data.symlink.container_path, + realdebrid_api_key: data.data.real_debrid.api_key + } +} + +export function generalSettingsToSet(form: SuperValidated) { + return [ + { + key: 'debug', + value: form.data.debug + }, + { + key: 'log', + value: form.data.log + }, + { + key: 'symlink', + value: { + host_path: form.data.host_path, + container_path: form.data.container_path + } + }, + { + key: 'real_debrid', + value: { + api_key: form.data.realdebrid_api_key + } + } + ]; +} + +// Content Settings ----------------------------------------------------------------------------------- +export const contentSettingsToGet: string[] = ['content']; + +export function contentSettingsToPass(data:any) { + return { + overseerr_enabled: data.data.content.overseerr.enabled, + overseerr_url: data.data.content.overseerr?.url || '', + overseerr_api_key: data.data.content.overseerr?.api_key || '', + mdblist_enabled: data.data.content.mdblist.enabled, + mdblist_api_key: data.data.content.mdblist?.api_key || '', + mdblist_update_interval: data.data.content.mdblist?.update_interval || 80, + mdblist_lists: data.data.content.mdblist?.lists || [''], + plex_watchlist_enabled: data.data.content.plex_watchlist.enabled, + plex_watchlist_rss: data.data.content.plex_watchlist?.rss || '', + plex_watchlist_update_interval: + data.data.content.plex_watchlist?.update_interval || 80, + listrr_enabled: data.data.content.listrr.enabled, + listrr_api_key: data.data.content.listrr?.api_key || '', + listrr_update_interval: data.data.content.listrr?.update_interval || 80, + listrr_movie_lists: data.data.content.listrr?.movie_lists || [''], + listrr_show_lists: data.data.content.listrr?.show_lists || [''] + } +} + +export function contentSettingsToSet(form: SuperValidated) { + return [ + { + key: 'content', + value: { + overseerr: { + enabled: form.data.overseerr_enabled, + url: form.data.overseerr_url, + api_key: form.data.overseerr_api_key + }, + mdblist: { + enabled: form.data.mdblist_enabled, + api_key: form.data.mdblist_api_key, + update_interval: form.data.mdblist_update_interval, + lists: form.data.mdblist_lists + }, + plex_watchlist: { + enabled: form.data.plex_watchlist_enabled, + rss: form.data.plex_watchlist_rss, + update_interval: form.data.plex_watchlist_update_interval + }, + listrr: { + enabled: form.data.listrr_enabled, + api_key: form.data.listrr_api_key, + update_interval: form.data.listrr_update_interval, + movie_lists: form.data.listrr_movie_lists, + show_lists: form.data.listrr_show_lists + } + } + } + ]; +} + +// Media Server Settings ----------------------------------------------------------------------------------- +export const mediaServerSettingsToGet: string[] = ['plex']; + +export function mediaServerSettingsToPass(data: any) { + return { + plex_token: data.data.plex.token, + plex_url: data.data.plex.url + } +} + +export function mediaServerSettingsToSet(form: SuperValidated) { + return [ + { + key: 'plex', + value: { + token: form.data.plex_token, + url: form.data.plex_url + } + } + ]; +} + +// Scrapers Settings ----------------------------------------------------------------------------------- +export const scrapersSettingsToGet: string[] = ['scraping']; + +export function scrapersSettingsToPass(data: any) { + return { + after_2: data.data.scraping.after_2, + after_5: data.data.scraping.after_5, + after_10: data.data.scraping.after_10, + torrentio_enabled: data.data.scraping.torrentio.enabled, + orionoid_enabled: data.data.scraping.orionoid.enabled, + jackett_enabled: data.data.scraping.jackett.enabled, + torrentio_filter: data.data.scraping.torrentio?.filter || '', + orionoid_api_key: data.data.scraping.orionoid?.api_key || '', + jackett_url: data.data.scraping.jackett?.url || '' + } +} + +export function scrapersSettingsToSet(form: SuperValidated) { + return [ + { + key: 'scraping', + value: { + after_2: form.data.after_2, + after_5: form.data.after_5, + after_10: form.data.after_10, + torrentio: { + enabled: form.data.torrentio_enabled, + filter: form.data.torrentio_filter + }, + orionoid: { + enabled: form.data.orionoid_enabled, + api_key: form.data.orionoid_api_key + }, + jackett: { + enabled: form.data.jackett_enabled, + url: form.data.jackett_url + } + } + } + ]; +} diff --git a/frontend/src/lib/forms/media-server-form.svelte b/frontend/src/lib/forms/media-server-form.svelte new file mode 100644 index 00000000..1d200bbf --- /dev/null +++ b/frontend/src/lib/forms/media-server-form.svelte @@ -0,0 +1,169 @@ + + + +
+ + + + Plex URL + + + + {#if $errors.plex_url} + + {/if} + + + + + + Plex Token + + + + + + {#if $errors.plex_token} + + {/if} + + + +
+ +
+
+
diff --git a/frontend/src/lib/forms/scrapers-form.svelte b/frontend/src/lib/forms/scrapers-form.svelte new file mode 100644 index 00000000..05e9f88f --- /dev/null +++ b/frontend/src/lib/forms/scrapers-form.svelte @@ -0,0 +1,172 @@ + + + +
+ + + + Retry After 2 Times (hr) + + + + {#if $errors.after_2} + + {/if} + + + + + + Retry After 5 Times (hr) + + + + {#if $errors.after_5} + + {/if} + + + + + + Retry After 10 Times (hr) + + + + {#if $errors.after_10} + + {/if} + + +
+

+ Scrapers Enabled +

+
+ +
+ + Torrentio +
+
+ + +
+ + Orionoid +
+
+ + +
+ + Jackett +
+
+
+
+ + {#if $form.torrentio_enabled} +
+ + + + Torrentio Filter + + + + {#if $errors.torrentio_filter} + + {/if} + +
+ {/if} + + {#if $form.orionoid_enabled} +
+ + + + Orionoid API Key + + 0 + })} + spellcheck="false" + /> + + {#if $errors.orionoid_api_key} + + {/if} + +
+ {/if} + + {#if $form.jackett_enabled} +
+ + + + Jackett URL + + + + {#if $errors.jackett_url} + + {/if} + +
+ {/if} + + +
+ +
+
+
diff --git a/frontend/src/lib/helpers.ts b/frontend/src/lib/helpers.ts index ba460fbd..a6845bac 100644 --- a/frontend/src/lib/helpers.ts +++ b/frontend/src/lib/helpers.ts @@ -1,5 +1,5 @@ import { DateTime } from 'luxon'; -import type { PlexDebridItem } from '$lib/types'; +import type { IcebergItem } from '$lib/types'; // only works with real-debrid dates because of CET format provided by RD export function formatRDDate(inputDate: string, format: string = 'long'): string { @@ -28,7 +28,7 @@ export function formatDate( format: string = 'long', relative: boolean = false ): string { - let date = DateTime.fromISO(inputDate, { zone: 'utc' }); + let date = DateTime.fromISO(inputDate); date = date.setZone('local'); let formattedDate; @@ -57,8 +57,8 @@ export function formatWords(words: string) { .join(' '); } -export function convertPlexDebridItemsToObject(items: PlexDebridItem[]) { - const result: { [key: string]: PlexDebridItem[] } = {}; +export function convertPlexDebridItemsToObject(items: IcebergItem[]) { + const result: { [key: string]: IcebergItem[] } = {}; for (const item of items) { if (!result[item.state]) { diff --git a/frontend/src/lib/schemas/setting.ts b/frontend/src/lib/schemas/setting.ts index ede90f53..b2309924 100644 --- a/frontend/src/lib/schemas/setting.ts +++ b/frontend/src/lib/schemas/setting.ts @@ -1,6 +1,8 @@ import { z } from 'zod'; export const generalSettingsSchema = z.object({ + debug: z.boolean().default(true), + log: z.boolean().default(true), host_path: z.string().min(1), container_path: z.string().min(1), realdebrid_api_key: z.string().min(1), @@ -33,7 +35,12 @@ export const contentSettingsSchema = z.object({ mdblist_lists: z.string().array().optional().default(['']), plex_watchlist_enabled: z.boolean().default(false), plex_watchlist_rss: z.union([z.string().url(), z.string().optional()]).optional().default(''), - plex_watchlist_update_interval: z.number().nonnegative().int().optional().default(80) + plex_watchlist_update_interval: z.number().nonnegative().int().optional().default(80), + listrr_enabled: z.boolean().default(false), + listrr_api_key: z.string().optional().default(''), + listrr_update_interval: z.number().nonnegative().int().optional().default(80), + listrr_movie_lists: z.string().array().optional().default(['']), + listrr_show_lists: z.string().array().optional().default(['']) }); export type GeneralSettingsSchema = typeof generalSettingsSchema; diff --git a/frontend/src/lib/types.ts b/frontend/src/lib/types.ts index c82b4904..d1287722 100644 --- a/frontend/src/lib/types.ts +++ b/frontend/src/lib/types.ts @@ -15,18 +15,22 @@ export interface UserResponse { expiration: string; } -export interface PlexDebridItem { +export interface IcebergItem { item_id: number; title: string; type: string; - imdb_id: string; + imdb_id: string | null; + tvdb_id: number | null; + tmdb_id: number | null; state: string; imdb_link: string; aired_at: string; genres: string[]; - guid: string; + guid: string | null; requested_at: string; requested_by: string; + scraped_at: string | null; + scraped_times: number | null; } export interface StatusInterface { @@ -38,4 +42,4 @@ export interface StatusInterface { export interface StatusInfo { [key: string]: StatusInterface; -} \ No newline at end of file +} diff --git a/frontend/src/routes/+layout.svelte b/frontend/src/routes/+layout.svelte index f9b8afa8..f672084e 100644 --- a/frontend/src/routes/+layout.svelte +++ b/frontend/src/routes/+layout.svelte @@ -1,6 +1,6 @@ + + + Onboarding | Iceberg + + + diff --git a/frontend/src/routes/onboarding/+page.svelte b/frontend/src/routes/onboarding/+page.svelte new file mode 100644 index 00000000..c614532b --- /dev/null +++ b/frontend/src/routes/onboarding/+page.svelte @@ -0,0 +1,60 @@ + + +
+
+
+ +
+

Welcome to Iceberg!

+

+ Before you can start using Iceberg, you need to configure some services first. +

+ +
+
diff --git a/frontend/src/routes/onboarding/1/+page.server.ts b/frontend/src/routes/onboarding/1/+page.server.ts new file mode 100644 index 00000000..1edbc2c3 --- /dev/null +++ b/frontend/src/routes/onboarding/1/+page.server.ts @@ -0,0 +1,30 @@ +import type { PageServerLoad, Actions } from './$types'; +import { fail, error } from '@sveltejs/kit'; +import { message, superValidate } from 'sveltekit-superforms/server'; +import { generalSettingsSchema } from '$lib/schemas/setting'; +import { saveSettings } from '$lib/helpers'; +import { + generalSettingsToGet, + generalSettingsToPass, + generalSettingsToSet +} from '$lib/forms/helpers'; + +export const load: PageServerLoad = async ({ fetch }) => { + async function getPartialSettings() { + try { + const results = await fetch( + `http://127.0.0.1:8080/settings/get/${generalSettingsToGet.join(',')}` + ); + return await results.json(); + } catch (e) { + console.error(e); + error(503, 'Unable to fetch settings data. API is down.'); + } + } + + let data: any = await getPartialSettings(); + let toPassToSchema = generalSettingsToPass(data); + + const form = await superValidate(toPassToSchema, generalSettingsSchema, { errors: false }); + return { form }; +}; diff --git a/frontend/src/routes/onboarding/1/+page.svelte b/frontend/src/routes/onboarding/1/+page.svelte new file mode 100644 index 00000000..26d76247 --- /dev/null +++ b/frontend/src/routes/onboarding/1/+page.svelte @@ -0,0 +1,21 @@ + + +
+
+

Step 1/4

+

Let's get started by configuring your general settings.

+ Fields marked with * require restart of backend services. +
+
+ + +
+
diff --git a/frontend/src/routes/onboarding/2/+page.server.ts b/frontend/src/routes/onboarding/2/+page.server.ts new file mode 100644 index 00000000..46571291 --- /dev/null +++ b/frontend/src/routes/onboarding/2/+page.server.ts @@ -0,0 +1,30 @@ +import type { PageServerLoad, Actions } from './$types'; +import { fail, error } from '@sveltejs/kit'; +import { message, superValidate } from 'sveltekit-superforms/server'; +import { mediaServerSettingsSchema } from '$lib/schemas/setting'; +import { saveSettings } from '$lib/helpers'; +import { + mediaServerSettingsToGet, + mediaServerSettingsToPass, + mediaServerSettingsToSet +} from '$lib/forms/helpers'; + +export const load: PageServerLoad = async ({ fetch }) => { + async function getPartialSettings() { + try { + const results = await fetch( + `http://127.0.0.1:8080/settings/get/${mediaServerSettingsToGet.join(',')}` + ); + return await results.json(); + } catch (e) { + console.error(e); + error(503, 'Unable to fetch settings data. API is down.'); + } + } + + let data: any = await getPartialSettings(); + let toPassToSchema = mediaServerSettingsToPass(data); + + const form = await superValidate(toPassToSchema, mediaServerSettingsSchema, { errors: false }); + return { form }; +}; diff --git a/frontend/src/routes/onboarding/2/+page.svelte b/frontend/src/routes/onboarding/2/+page.svelte new file mode 100644 index 00000000..7a86cb12 --- /dev/null +++ b/frontend/src/routes/onboarding/2/+page.svelte @@ -0,0 +1,21 @@ + + +
+
+

Step 2/4

+

Time to configure your media server.

+ Fields marked with * require restart of backend services. +
+
+ + +
+
diff --git a/frontend/src/routes/onboarding/3/+page.server.ts b/frontend/src/routes/onboarding/3/+page.server.ts new file mode 100644 index 00000000..b46ca034 --- /dev/null +++ b/frontend/src/routes/onboarding/3/+page.server.ts @@ -0,0 +1,30 @@ +import type { PageServerLoad, Actions } from './$types'; +import { fail, error } from '@sveltejs/kit'; +import { message, superValidate } from 'sveltekit-superforms/server'; +import { contentSettingsSchema } from '$lib/schemas/setting'; +import { saveSettings } from '$lib/helpers'; +import { + contentSettingsToGet, + contentSettingsToPass, + contentSettingsToSet +} from '$lib/forms/helpers'; + +export const load: PageServerLoad = async ({ fetch }) => { + async function getPartialSettings() { + try { + const results = await fetch( + `http://127.0.0.1:8080/settings/get/${contentSettingsToGet.join(',')}` + ); + return await results.json(); + } catch (e) { + console.error(e); + error(503, 'Unable to fetch settings data. API is down.'); + } + } + + let data: any = await getPartialSettings(); + let toPassToSchema = contentSettingsToPass(data); + + const form = await superValidate(toPassToSchema, contentSettingsSchema, { errors: false }); + return { form }; +}; diff --git a/frontend/src/routes/onboarding/3/+page.svelte b/frontend/src/routes/onboarding/3/+page.svelte new file mode 100644 index 00000000..96d3c405 --- /dev/null +++ b/frontend/src/routes/onboarding/3/+page.svelte @@ -0,0 +1,21 @@ + + +
+
+

Step 3/4

+

Services to request content from.

+ Fields marked with * require restart of backend services. +
+
+ + +
+
diff --git a/frontend/src/routes/onboarding/4/+page.server.ts b/frontend/src/routes/onboarding/4/+page.server.ts new file mode 100644 index 00000000..0675662d --- /dev/null +++ b/frontend/src/routes/onboarding/4/+page.server.ts @@ -0,0 +1,30 @@ +import type { PageServerLoad, Actions } from './$types'; +import { fail, error } from '@sveltejs/kit'; +import { message, superValidate } from 'sveltekit-superforms/server'; +import { scrapersSettingsSchema } from '$lib/schemas/setting'; +import { saveSettings } from '$lib/helpers'; +import { + scrapersSettingsToGet, + scrapersSettingsToPass, + scrapersSettingsToSet +} from '$lib/forms/helpers'; + +export const load: PageServerLoad = async ({ fetch }) => { + async function getPartialSettings() { + try { + const results = await fetch( + `http://127.0.0.1:8080/settings/get/${scrapersSettingsToGet.join(',')}` + ); + return await results.json(); + } catch (e) { + console.error(e); + error(503, 'Unable to fetch settings data. API is down.'); + } + } + + let data: any = await getPartialSettings(); + let toPassToSchema = scrapersSettingsToPass(data); + + const form = await superValidate(toPassToSchema, scrapersSettingsSchema, { errors: false }); + return { form }; +}; diff --git a/frontend/src/routes/onboarding/4/+page.svelte b/frontend/src/routes/onboarding/4/+page.svelte new file mode 100644 index 00000000..ee1ee5b1 --- /dev/null +++ b/frontend/src/routes/onboarding/4/+page.svelte @@ -0,0 +1,21 @@ + + +
+
+

Step 4/4

+

Configure where to scrape content from.

+ Fields marked with * require restart of backend services. +
+
+ + +
+
diff --git a/frontend/src/routes/settings/content/+page.server.ts b/frontend/src/routes/settings/content/+page.server.ts index 93d59729..c8dcd9ec 100644 --- a/frontend/src/routes/settings/content/+page.server.ts +++ b/frontend/src/routes/settings/content/+page.server.ts @@ -1,14 +1,20 @@ import type { PageServerLoad, Actions } from './$types'; -import { fail, error } from '@sveltejs/kit'; +import { fail, error, redirect } from '@sveltejs/kit'; import { message, superValidate } from 'sveltekit-superforms/server'; import { contentSettingsSchema } from '$lib/schemas/setting'; import { saveSettings } from '$lib/helpers'; +import { + contentSettingsToGet, + contentSettingsToPass, + contentSettingsToSet +} from '$lib/forms/helpers'; export const load: PageServerLoad = async ({ fetch }) => { async function getPartialSettings() { try { - const toGet = ['content']; - const results = await fetch(`http://127.0.0.1:8080/settings/get/${toGet.join(',')}`); + const results = await fetch( + `http://127.0.0.1:8080/settings/get/${contentSettingsToGet.join(',')}` + ); return await results.json(); } catch (e) { console.error(e); @@ -16,23 +22,10 @@ export const load: PageServerLoad = async ({ fetch }) => { } } - let toPassToSchema: any = await getPartialSettings(); - toPassToSchema = { - overseerr_enabled: toPassToSchema.data.content.overseerr.enabled, - overseerr_url: toPassToSchema.data.content.overseerr?.url || '', - overseerr_api_key: toPassToSchema.data.content.overseerr?.api_key || '', - mdblist_enabled: toPassToSchema.data.content.mdblist.enabled, - mdblist_api_key: toPassToSchema.data.content.mdblist?.api_key || '', - mdblist_update_interval: toPassToSchema.data.content.mdblist?.update_interval || 80, - mdblist_lists: toPassToSchema.data.content.mdblist?.lists || [''], - plex_watchlist_enabled: toPassToSchema.data.content.plex_watchlist.enabled, - plex_watchlist_rss: toPassToSchema.data.content.plex_watchlist?.rss || '', - plex_watchlist_update_interval: - toPassToSchema.data.content.plex_watchlist?.update_interval || 80 - }; + let data: any = await getPartialSettings(); + const toPassToSchema = contentSettingsToPass(data); const form = await superValidate(toPassToSchema, contentSettingsSchema); - return { form }; }; @@ -44,29 +37,7 @@ export const actions: Actions = { form }); } - const toSet = [ - { - key: 'content', - value: { - overseerr: { - enabled: form.data.overseerr_enabled, - url: form.data.overseerr_url, - api_key: form.data.overseerr_api_key - }, - mdblist: { - enabled: form.data.mdblist_enabled, - api_key: form.data.mdblist_api_key, - update_interval: form.data.mdblist_update_interval, - lists: form.data.mdblist_lists - }, - plex_watchlist: { - enabled: form.data.plex_watchlist_enabled, - rss: form.data.plex_watchlist_rss, - update_interval: form.data.plex_watchlist_update_interval - } - } - } - ]; + const toSet = contentSettingsToSet(form); try { const data = await saveSettings(event.fetch, toSet); @@ -77,6 +48,10 @@ export const actions: Actions = { }); } + if (event.url.searchParams.get('onboarding') === 'true') { + redirect(302, '/onboarding/4'); + } + return message(form, 'Settings saved!'); } }; diff --git a/frontend/src/routes/settings/content/+page.svelte b/frontend/src/routes/settings/content/+page.svelte index b31e3193..a01d9345 100644 --- a/frontend/src/routes/settings/content/+page.svelte +++ b/frontend/src/routes/settings/content/+page.svelte @@ -1,254 +1,13 @@

Content Settings

Configure content providers for Iceberg.

- -
-
-

- Content Providers -

-
- -
- - Overseerr -
-
- - -
- - Mdblist -
-
- - -
- - Plex Watchlists -
-
-
-
- - {#if $form.overseerr_enabled} -
- - - - Overseerr URL - - - - - -
- -
- - - - Overseerr API Key - - 0 - })} - spellcheck="false" - /> - - - -
- {/if} - - {#if $form.plex_watchlist_enabled} -
- - - - Plex RSS URL - - - - - -
- -
- - - - Plex RSS Update Interval - - - - - -
- {/if} - - - - - {#if $form.mdblist_enabled} -
- - - - Mdblist API Key - - 0 - })} - spellcheck="false" - /> - - - -
- -
- - - - Mdblist Update Interval - - - - - -
- - {#if $mdblistListsErrors} - {$mdblistListsErrors} - {/if} - -
- -
- -
- {#each $mdblistListsValues.filter((list) => list !== '') as list (list)} - - {/each} -
-
-
- {/if} - - -
- -
-
-
+
diff --git a/frontend/src/routes/settings/general/+page.server.ts b/frontend/src/routes/settings/general/+page.server.ts index 62d9912b..4ea82c01 100644 --- a/frontend/src/routes/settings/general/+page.server.ts +++ b/frontend/src/routes/settings/general/+page.server.ts @@ -1,14 +1,14 @@ import type { PageServerLoad, Actions } from './$types'; -import { fail, error } from '@sveltejs/kit'; +import { fail, error, redirect } from '@sveltejs/kit'; import { message, superValidate } from 'sveltekit-superforms/server'; import { generalSettingsSchema } from '$lib/schemas/setting'; import { saveSettings } from '$lib/helpers'; +import { generalSettingsToGet, generalSettingsToPass, generalSettingsToSet } from '$lib/forms/helpers'; export const load: PageServerLoad = async ({ fetch }) => { async function getPartialSettings() { try { - const toGet = ['symlink', 'real_debrid']; - const results = await fetch(`http://127.0.0.1:8080/settings/get/${toGet.join(',')}`); + const results = await fetch(`http://127.0.0.1:8080/settings/get/${generalSettingsToGet.join(',')}`); return await results.json(); } catch (e) { console.error(e); @@ -16,41 +16,24 @@ export const load: PageServerLoad = async ({ fetch }) => { } } - let toPassToSchema: any = await getPartialSettings(); - toPassToSchema = { - host_path: toPassToSchema.data.symlink.host_path, - container_path: toPassToSchema.data.symlink.container_path, - realdebrid_api_key: toPassToSchema.data.real_debrid.api_key - }; + let data: any = await getPartialSettings(); + let toPassToSchema = generalSettingsToPass(data); const form = await superValidate(toPassToSchema, generalSettingsSchema); - return { form }; }; export const actions: Actions = { default: async (event) => { const form = await superValidate(event, generalSettingsSchema); + console.log(event.url.searchParams) + if (!form.valid) { return fail(400, { form }); } - const toSet = [ - { - key: 'symlink', - value: { - host_path: form.data.host_path, - container_path: form.data.container_path - } - }, - { - key: 'real_debrid', - value: { - api_key: form.data.realdebrid_api_key - } - } - ]; + const toSet = generalSettingsToSet(form); try { const data = await saveSettings(event.fetch, toSet); @@ -61,6 +44,10 @@ export const actions: Actions = { }); } + if (event.url.searchParams.get('onboarding') === 'true') { + redirect(302, '/onboarding/2'); + } + return message(form, 'Settings saved!'); } }; diff --git a/frontend/src/routes/settings/general/+page.svelte b/frontend/src/routes/settings/general/+page.svelte index 2a6c6088..202fd626 100644 --- a/frontend/src/routes/settings/general/+page.svelte +++ b/frontend/src/routes/settings/general/+page.svelte @@ -1,25 +1,9 @@
@@ -27,59 +11,21 @@

Configure global and default settings for Iceberg.

+ + + What is the difference in debug and log? + DEBUG is the log level, if you turn it off you will only see INFO in the logs. LOG on + another hand means logging to file. + + - -
- - - - Host Path - - - - - - - - - - Container Path - - - - - - - - - - Real Debrid API Key - - 0 - })} - spellcheck="false" - /> - - - +

+ * These settings require a restart to take effect. +

- -
- -
-
-
+
diff --git a/frontend/src/routes/settings/mediaserver/+page.server.ts b/frontend/src/routes/settings/mediaserver/+page.server.ts index e4a33d31..0b2b4f54 100644 --- a/frontend/src/routes/settings/mediaserver/+page.server.ts +++ b/frontend/src/routes/settings/mediaserver/+page.server.ts @@ -1,14 +1,20 @@ import type { PageServerLoad, Actions } from './$types'; -import { fail, error } from '@sveltejs/kit'; +import { fail, error, redirect } from '@sveltejs/kit'; import { message, superValidate } from 'sveltekit-superforms/server'; import { mediaServerSettingsSchema } from '$lib/schemas/setting'; import { saveSettings } from '$lib/helpers'; +import { + mediaServerSettingsToGet, + mediaServerSettingsToPass, + mediaServerSettingsToSet +} from '$lib/forms/helpers'; export const load: PageServerLoad = async ({ fetch }) => { async function getPartialSettings() { try { - const toGet = ['plex']; - const results = await fetch(`http://127.0.0.1:8080/settings/get/${toGet.join(',')}`); + const results = await fetch( + `http://127.0.0.1:8080/settings/get/${mediaServerSettingsToGet.join(',')}` + ); return await results.json(); } catch (e) { console.error(e); @@ -16,14 +22,10 @@ export const load: PageServerLoad = async ({ fetch }) => { } } - let toPassToSchema: any = await getPartialSettings(); - toPassToSchema = { - plex_token: toPassToSchema.data.plex.token, - plex_url: toPassToSchema.data.plex.url - }; + let data: any = await getPartialSettings(); + let toPassToSchema = mediaServerSettingsToPass(data); const form = await superValidate(toPassToSchema, mediaServerSettingsSchema); - return { form }; }; @@ -35,15 +37,7 @@ export const actions: Actions = { form }); } - const toSet = [ - { - key: 'plex', - value: { - token: form.data.plex_token, - url: form.data.plex_url - } - } - ]; + const toSet = mediaServerSettingsToSet(form); try { const data = await saveSettings(event.fetch, toSet); @@ -54,6 +48,10 @@ export const actions: Actions = { }); } + if (event.url.searchParams.get('onboarding') === 'true') { + redirect(302, '/onboarding/3'); + } + return message(form, 'Settings saved!'); } }; diff --git a/frontend/src/routes/settings/mediaserver/+page.svelte b/frontend/src/routes/settings/mediaserver/+page.svelte index 6d0bc8ee..471ccd56 100644 --- a/frontend/src/routes/settings/mediaserver/+page.svelte +++ b/frontend/src/routes/settings/mediaserver/+page.svelte @@ -1,24 +1,7 @@
@@ -27,52 +10,5 @@ Configure media server settings for Iceberg.

- -
- - - - Plex URL - - - - - - - - - - Plex Token - - 0 - })} - spellcheck="false" - /> - - - - - -
- -
-
-
+
diff --git a/frontend/src/routes/settings/scrapers/+page.server.ts b/frontend/src/routes/settings/scrapers/+page.server.ts index 834d9892..dcb3558b 100644 --- a/frontend/src/routes/settings/scrapers/+page.server.ts +++ b/frontend/src/routes/settings/scrapers/+page.server.ts @@ -1,14 +1,14 @@ import type { PageServerLoad, Actions } from './$types'; -import { fail, error } from '@sveltejs/kit'; +import { fail, error, redirect } from '@sveltejs/kit'; import { message, superValidate } from 'sveltekit-superforms/server'; import { scrapersSettingsSchema } from '$lib/schemas/setting'; import { saveSettings } from '$lib/helpers'; +import { scrapersSettingsToGet, scrapersSettingsToPass, scrapersSettingsToSet } from '$lib/forms/helpers'; export const load: PageServerLoad = async ({ fetch }) => { async function getPartialSettings() { try { - const toGet = ['scraping']; - const results = await fetch(`http://127.0.0.1:8080/settings/get/${toGet.join(',')}`); + const results = await fetch(`http://127.0.0.1:8080/settings/get/${scrapersSettingsToGet.join(',')}`); return await results.json(); } catch (e) { console.error(e); @@ -16,21 +16,10 @@ export const load: PageServerLoad = async ({ fetch }) => { } } - let toPassToSchema: any = await getPartialSettings(); - toPassToSchema = { - after_2: toPassToSchema.data.scraping.after_2, - after_5: toPassToSchema.data.scraping.after_5, - after_10: toPassToSchema.data.scraping.after_10, - torrentio_enabled: toPassToSchema.data.scraping.torrentio.enabled, - orionoid_enabled: toPassToSchema.data.scraping.orionoid.enabled, - jackett_enabled: toPassToSchema.data.scraping.jackett.enabled, - torrentio_filter: toPassToSchema.data.scraping.torrentio?.filter || '', - orionoid_api_key: toPassToSchema.data.scraping.orionoid?.api_key || '', - jackett_url: toPassToSchema.data.scraping.jackett?.url || '' - }; + let data: any = await getPartialSettings(); + let toPassToSchema = scrapersSettingsToPass(data); const form = await superValidate(toPassToSchema, scrapersSettingsSchema); - return { form }; }; @@ -42,28 +31,7 @@ export const actions: Actions = { form }); } - const toSet = [ - { - key: 'scraping', - value: { - after_2: form.data.after_2, - after_5: form.data.after_5, - after_10: form.data.after_10, - torrentio: { - enabled: form.data.torrentio_enabled, - filter: form.data.torrentio_filter - }, - orionoid: { - enabled: form.data.orionoid_enabled, - api_key: form.data.orionoid_api_key - }, - jackett: { - enabled: form.data.jackett_enabled, - url: form.data.jackett_url, - } - } - } - ]; + const toSet = scrapersSettingsToSet(form); try { const data = await saveSettings(event.fetch, toSet); @@ -74,6 +42,10 @@ export const actions: Actions = { }); } + if (event.url.searchParams.get('onboarding') === 'true') { + redirect(302, '/?onboarding=true'); + } + return message(form, 'Settings saved!'); } }; diff --git a/frontend/src/routes/settings/scrapers/+page.svelte b/frontend/src/routes/settings/scrapers/+page.svelte index f3577dd7..8fe21ff1 100644 --- a/frontend/src/routes/settings/scrapers/+page.svelte +++ b/frontend/src/routes/settings/scrapers/+page.svelte @@ -1,161 +1,13 @@

Scraper Settings

-

- Configure scraper settings for Iceberg. -

- - -
- - - - Retry After 2 Times (hr) - - - - - - - - - - Retry After 5 Times (hr) - - - - - - - - - - Retry After 10 Times (hr) - - - - - - -
-

- Scrapers Enabled -

-
- -
- - Torrentio -
-
- - -
- - Orionoid -
-
- - -
- - Jackett -
-
-
-
- - {#if $form.torrentio_enabled} -
- - - - Torrentio Filter - - - - - -
- {/if} - - {#if $form.orionoid_enabled} -
- - - - Orionoid API Key - - 0 - })} - spellcheck="false" - /> - - - -
- {/if} - - {#if $form.jackett_enabled} -
- - - - Jackett URL - - - - - -
- {/if} +

Configure scraper settings for Iceberg.

- -
- -
-
-
+
diff --git a/frontend/svelte.config.js b/frontend/svelte.config.js index 9c4355df..6ecdb6d7 100644 --- a/frontend/svelte.config.js +++ b/frontend/svelte.config.js @@ -1,16 +1,9 @@ import adapter from '@sveltejs/adapter-node'; import { vitePreprocess } from '@sveltejs/vite-plugin-svelte'; - /** @type {import('@sveltejs/kit').Config} */ const config = { - // Consult https://kit.svelte.dev/docs/integrations#preprocessors - // for more information about preprocessors - preprocess: [vitePreprocess({})], - + preprocess: vitePreprocess({}), kit: { - // adapter-auto only supports some environments, see https://kit.svelte.dev/docs/adapter-auto for a list. - // If your environment is not supported or you settled on a specific environment, switch out the adapter. - // See https://kit.svelte.dev/docs/adapters for more information about adapters. adapter: adapter(), alias: { $lib: './src/lib' From 3d834c3590adab3f83dd0414c5f9ae1b1079328f Mon Sep 17 00:00:00 2001 From: Ayush Sehrawat <69469790+AyushSehrawat@users.noreply.github.com> Date: Wed, 24 Jan 2024 13:56:01 +0530 Subject: [PATCH 23/65] feat: frontend improvements (#159) * feat: added global debug for settings * feat: added dev to formDebug so it's always true in development but false in production * feat: added DEBUG & LOG to general settings * deps: switched svelte-sonner to shadcn customized toast component * refactor: renamed PlexDebridItem to IcebergItem and added changes made to /items * fix: fixed the wrong relative date in status page * docs: readme improvements * docs: readme improvements * docs: readme improvements * chore(deps): bump lucide-svelte from 0.303.0 to 0.307.0 in /frontend (#124) * refactor: componentized forms, soon will do same for fields too * Parse rewrite (#128) * Move parser to its own module * Add ORIGIN to env vars * Fix overseerr, watchlist, jackett validation. * Added more refined logic to parser module. * Set stage for testing * Add methods for individual checks * Update sort logic * Update default settings * Fix jackett. Begin to add title support for jackett. --------- Co-authored-by: Spoked Co-authored-by: Dreu LaVelle * feat: onboarding on the way ;), major refactoring of form related code * Simplified downloading logic and modified state matchine * fix typo in state machine and handle movie pathing correctly * Remove useless method * Temporary fix to test * Remove uncached stream hashes from item to avoid loop, some blacklisting logic could also be good * chore(deps-dev): bump @typescript-eslint/eslint-plugin in /frontend (#134) * chore(deps): bump lucide-svelte from 0.307.0 to 0.309.0 in /frontend (#133) * chore(deps-dev): bump @sveltejs/kit from 2.0.1 to 2.3.2 in /frontend (#132) * chore(deps): bump bits-ui from 0.13.0 to 0.14.0 in /frontend (#130) * chore(deps-dev): bump @sveltejs/adapter-node in /frontend (#138) * feat: some more onboarding and form improvements * Dev startup to disabling pickling * feat: Listrr Support Added (#136) * Start Listrr Feature * feat: Listrr ready for review. * small tweaks. rewrite coming later. --------- Co-authored-by: Spoked * Jackett rewrite (#139) * Add TorBox scraper * Add is_anime attribute to item * Rework Jackett to Keyword Queries. Added categories. Removed Torbox * Remove audio from parsing, it removed alot of good hits * fix movie scraping and modify response parsing logic to be more readable * fix: remove torbox module * remove audio from being parsed * remove more audio from parser * fix typo * fix: tidy audio and networks * small tweaks --------- Co-authored-by: Spoked Co-authored-by: Gaisberg * Avoid [None] if empty content service * fix: handle bad quality manually in parser (#145) Co-authored-by: Spoked * deps: updated deps due to security updateS * feat: added more onboarding steps, some bugs also introduced * chore(deps-dev): bump @sveltejs/kit from 2.3.2 to 2.4.2 in /frontend (#156) * chore(deps-dev): bump prettier from 3.1.1 to 3.2.4 in /frontend (#155) * chore(deps): bump lucide-svelte from 0.309.0 to 0.314.0 in /frontend (#154) * chore(deps): bump bits-ui from 0.14.0 to 0.15.1 in /frontend (#153) * feat: minor changes * feat: deps change * feat: deps change * feat: onboarding MVP done * refactor: moved schemes into forms/helpers.ts and command menu improvements * refactor: switched to new font, changes made to all except status page * refactor: minor change, didn't get commit * fix: minor fix * feat: fixed git merge conflicts issue * feat: fixed status page font too --------- Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Spoked <5782630+dreulavelle@users.noreply.github.com> Co-authored-by: Spoked Co-authored-by: Dreu LaVelle Co-authored-by: Gaisberg --- frontend/src/app.html | 60 ++++--- .../src/lib/components/header-item.svelte | 4 +- frontend/src/lib/components/header.svelte | 6 +- .../src/lib/components/service-status.svelte | 4 +- .../lib/components/status-media-card.svelte | 18 +-- frontend/src/lib/forms/content-form.svelte | 114 ++++++-------- frontend/src/lib/forms/general-form.svelte | 52 ++++--- frontend/src/lib/forms/helpers.ts | 70 +++++++-- .../src/lib/forms/media-server-form.svelte | 26 ++-- frontend/src/lib/forms/scrapers-form.svelte | 66 ++++---- frontend/src/lib/schemas/setting.ts | 49 ------ frontend/src/routes/+error.svelte | 9 +- frontend/src/routes/+layout.svelte | 146 ++++++++++-------- frontend/src/routes/+page.svelte | 22 +-- frontend/src/routes/onboarding/+page.svelte | 6 +- .../src/routes/onboarding/1/+page.server.ts | 15 +- frontend/src/routes/onboarding/1/+page.svelte | 6 +- .../src/routes/onboarding/2/+page.server.ts | 8 +- frontend/src/routes/onboarding/2/+page.svelte | 6 +- .../src/routes/onboarding/3/+page.server.ts | 8 +- frontend/src/routes/onboarding/3/+page.svelte | 6 +- .../src/routes/onboarding/4/+page.server.ts | 8 +- frontend/src/routes/onboarding/4/+page.svelte | 6 +- frontend/src/routes/settings/+layout.svelte | 4 +- .../src/routes/settings/about/+page.svelte | 22 +-- .../routes/settings/content/+page.server.ts | 2 +- .../src/routes/settings/content/+page.svelte | 4 +- .../routes/settings/general/+page.server.ts | 14 +- .../src/routes/settings/general/+page.svelte | 12 +- .../settings/mediaserver/+page.server.ts | 2 +- .../routes/settings/mediaserver/+page.svelte | 4 +- .../routes/settings/scrapers/+page.server.ts | 12 +- .../src/routes/settings/scrapers/+page.svelte | 4 +- frontend/src/routes/status/+page.svelte | 16 +- frontend/static/fonts/Afacad-Bold.woff | Bin 41960 -> 0 bytes frontend/static/fonts/Afacad-Bold.woff2 | Bin 30072 -> 0 bytes frontend/static/fonts/Afacad-Medium.woff | Bin 42740 -> 0 bytes frontend/static/fonts/Afacad-Medium.woff2 | Bin 30760 -> 0 bytes frontend/static/fonts/Afacad-Regular.woff | Bin 41300 -> 0 bytes frontend/static/fonts/Afacad-Regular.woff2 | Bin 29560 -> 0 bytes frontend/static/fonts/Afacad-SemiBold.woff | Bin 42864 -> 0 bytes frontend/static/fonts/Afacad-SemiBold.woff2 | Bin 30940 -> 0 bytes .../static/fonts/subset-Montserrat-Bold.woff | Bin 0 -> 19348 bytes .../static/fonts/subset-Montserrat-Bold.woff2 | Bin 0 -> 15036 bytes .../static/fonts/subset-Montserrat-Light.woff | Bin 0 -> 19140 bytes .../fonts/subset-Montserrat-Light.woff2 | Bin 0 -> 14740 bytes .../fonts/subset-Montserrat-Medium.woff | Bin 0 -> 19220 bytes .../fonts/subset-Montserrat-Medium.woff2 | Bin 0 -> 14920 bytes .../fonts/subset-Montserrat-Regular.woff | Bin 0 -> 19156 bytes .../fonts/subset-Montserrat-Regular.woff2 | Bin 0 -> 14832 bytes .../fonts/subset-Montserrat-SemiBold.woff | Bin 0 -> 19168 bytes .../fonts/subset-Montserrat-SemiBold.woff2 | Bin 0 -> 14896 bytes frontend/tailwind.config.js | 2 +- 53 files changed, 411 insertions(+), 402 deletions(-) delete mode 100644 frontend/src/lib/schemas/setting.ts delete mode 100644 frontend/static/fonts/Afacad-Bold.woff delete mode 100644 frontend/static/fonts/Afacad-Bold.woff2 delete mode 100644 frontend/static/fonts/Afacad-Medium.woff delete mode 100644 frontend/static/fonts/Afacad-Medium.woff2 delete mode 100644 frontend/static/fonts/Afacad-Regular.woff delete mode 100644 frontend/static/fonts/Afacad-Regular.woff2 delete mode 100644 frontend/static/fonts/Afacad-SemiBold.woff delete mode 100644 frontend/static/fonts/Afacad-SemiBold.woff2 create mode 100644 frontend/static/fonts/subset-Montserrat-Bold.woff create mode 100644 frontend/static/fonts/subset-Montserrat-Bold.woff2 create mode 100644 frontend/static/fonts/subset-Montserrat-Light.woff create mode 100644 frontend/static/fonts/subset-Montserrat-Light.woff2 create mode 100644 frontend/static/fonts/subset-Montserrat-Medium.woff create mode 100644 frontend/static/fonts/subset-Montserrat-Medium.woff2 create mode 100644 frontend/static/fonts/subset-Montserrat-Regular.woff create mode 100644 frontend/static/fonts/subset-Montserrat-Regular.woff2 create mode 100644 frontend/static/fonts/subset-Montserrat-SemiBold.woff create mode 100644 frontend/static/fonts/subset-Montserrat-SemiBold.woff2 diff --git a/frontend/src/app.html b/frontend/src/app.html index ab5964be..bd30208f 100644 --- a/frontend/src/app.html +++ b/frontend/src/app.html @@ -9,58 +9,80 @@ - + + -