From 2632350f4c21d1169aa7ddfaf92cf79eaa970935 Mon Sep 17 00:00:00 2001 From: Mustafa Soylu Date: Sat, 7 Jun 2025 18:08:43 +0200 Subject: [PATCH 1/4] fix: history db not initializing --- app.py | 160 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 88 insertions(+), 72 deletions(-) diff --git a/app.py b/app.py index 5b3ccaf..3e17a4a 100755 --- a/app.py +++ b/app.py @@ -21,75 +21,77 @@ import socket from urllib.parse import urlparse # Import Celery configuration and manager -from routes.utils.celery_tasks import celery_app from routes.utils.celery_manager import celery_manager from routes.utils.celery_config import REDIS_URL +from routes.utils.history_manager import init_history_db + # Configure application-wide logging def setup_logging(): """Configure application-wide logging with rotation""" # Create logs directory if it doesn't exist - logs_dir = Path('logs') + logs_dir = Path("logs") logs_dir.mkdir(exist_ok=True) - + # Set up log file paths - main_log = logs_dir / 'spotizerr.log' - + main_log = logs_dir / "spotizerr.log" + # Configure root logger root_logger = logging.getLogger() root_logger.setLevel(logging.INFO) - + # Clear any existing handlers from the root logger if root_logger.hasHandlers(): root_logger.handlers.clear() - + # Log formatting log_format = logging.Formatter( - '%(asctime)s [%(processName)s:%(threadName)s] [%(name)s] [%(levelname)s] - %(message)s', - datefmt='%Y-%m-%d %H:%M:%S' + "%(asctime)s [%(processName)s:%(threadName)s] [%(name)s] [%(levelname)s] - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", ) - + # File handler with rotation (10 MB max, keep 5 backups) file_handler = logging.handlers.RotatingFileHandler( - main_log, maxBytes=10*1024*1024, backupCount=5, encoding='utf-8' + main_log, maxBytes=10 * 1024 * 1024, backupCount=5, encoding="utf-8" ) file_handler.setFormatter(log_format) file_handler.setLevel(logging.INFO) - + # Console handler for stderr console_handler = logging.StreamHandler(sys.stderr) console_handler.setFormatter(log_format) console_handler.setLevel(logging.INFO) - + # Add handlers to root logger root_logger.addHandler(file_handler) root_logger.addHandler(console_handler) - + # Set up specific loggers - for logger_name in ['werkzeug', 'celery', 'routes', 'flask', 'waitress']: + for logger_name in ["werkzeug", "celery", "routes", "flask", "waitress"]: module_logger = logging.getLogger(logger_name) module_logger.setLevel(logging.INFO) # Handlers are inherited from root logger - + # Enable propagation for all loggers - logging.getLogger('celery').propagate = True - + logging.getLogger("celery").propagate = True + # Notify successful setup root_logger.info("Logging system initialized") - + # Return the main file handler for permissions adjustment return file_handler + def check_redis_connection(): """Check if Redis is reachable and retry with exponential backoff if not""" max_retries = 5 retry_count = 0 retry_delay = 1 # start with 1 second - + # Extract host and port from REDIS_URL redis_host = "redis" # default - redis_port = 6379 # default - + redis_port = 6379 # default + # Parse from REDIS_URL if possible if REDIS_URL: # parse hostname and port (handles optional auth) @@ -101,10 +103,10 @@ def check_redis_connection(): redis_port = parsed.port except Exception: pass - + # Log Redis connection details logging.info(f"Checking Redis connection to {redis_host}:{redis_port}") - + while retry_count < max_retries: try: # First try socket connection to check if Redis port is open @@ -112,10 +114,12 @@ def check_redis_connection(): sock.settimeout(2) result = sock.connect_ex((redis_host, redis_port)) sock.close() - + if result != 0: - raise ConnectionError(f"Cannot connect to Redis at {redis_host}:{redis_port}") - + raise ConnectionError( + f"Cannot connect to Redis at {redis_host}:{redis_port}" + ) + # If socket connection successful, try Redis ping r = redis.Redis.from_url(REDIS_URL) r.ping() @@ -124,82 +128,90 @@ def check_redis_connection(): except Exception as e: retry_count += 1 if retry_count >= max_retries: - logging.error(f"Failed to connect to Redis after {max_retries} attempts: {e}") - logging.error(f"Make sure Redis is running at {redis_host}:{redis_port}") + logging.error( + f"Failed to connect to Redis after {max_retries} attempts: {e}" + ) + logging.error( + f"Make sure Redis is running at {redis_host}:{redis_port}" + ) return False - + logging.warning(f"Redis connection attempt {retry_count} failed: {e}") logging.info(f"Retrying in {retry_delay} seconds...") time.sleep(retry_delay) retry_delay *= 2 # exponential backoff - + return False + def create_app(): - app = Flask(__name__, template_folder='static/html') - + app = Flask(__name__, template_folder="static/html") + # Set up CORS CORS(app) + # Initialize databases + init_history_db() + # Register blueprints - app.register_blueprint(config_bp, url_prefix='/api') - app.register_blueprint(search_bp, url_prefix='/api') - app.register_blueprint(credentials_bp, url_prefix='/api/credentials') - app.register_blueprint(album_bp, url_prefix='/api/album') - app.register_blueprint(track_bp, url_prefix='/api/track') - app.register_blueprint(playlist_bp, url_prefix='/api/playlist') - app.register_blueprint(artist_bp, url_prefix='/api/artist') - app.register_blueprint(prgs_bp, url_prefix='/api/prgs') - app.register_blueprint(history_bp, url_prefix='/api/history') - + app.register_blueprint(config_bp, url_prefix="/api") + app.register_blueprint(search_bp, url_prefix="/api") + app.register_blueprint(credentials_bp, url_prefix="/api/credentials") + app.register_blueprint(album_bp, url_prefix="/api/album") + app.register_blueprint(track_bp, url_prefix="/api/track") + app.register_blueprint(playlist_bp, url_prefix="/api/playlist") + app.register_blueprint(artist_bp, url_prefix="/api/artist") + app.register_blueprint(prgs_bp, url_prefix="/api/prgs") + app.register_blueprint(history_bp, url_prefix="/api/history") + # Serve frontend - @app.route('/') + @app.route("/") def serve_index(): - return render_template('main.html') + return render_template("main.html") # Config page route - @app.route('/config') + @app.route("/config") def serve_config(): - return render_template('config.html') + return render_template("config.html") # New route: Serve watch.html under /watchlist - @app.route('/watchlist') + @app.route("/watchlist") def serve_watchlist(): - return render_template('watch.html') + return render_template("watch.html") # New route: Serve playlist.html under /playlist/ - @app.route('/playlist/') + @app.route("/playlist/") def serve_playlist(id): # The id parameter is captured, but you can use it as needed. - return render_template('playlist.html') + return render_template("playlist.html") - @app.route('/album/') + @app.route("/album/") def serve_album(id): # The id parameter is captured, but you can use it as needed. - return render_template('album.html') + return render_template("album.html") - @app.route('/track/') + @app.route("/track/") def serve_track(id): # The id parameter is captured, but you can use it as needed. - return render_template('track.html') - - @app.route('/artist/') + return render_template("track.html") + + @app.route("/artist/") def serve_artist(id): # The id parameter is captured, but you can use it as needed. - return render_template('artist.html') + return render_template("artist.html") - @app.route('/history') + @app.route("/history") def serve_history_page(): - return render_template('history.html') + return render_template("history.html") - @app.route('/static/') + @app.route("/static/") def serve_static(path): - return send_from_directory('static', path) + return send_from_directory("static", path) # Serve favicon.ico from the same directory as index.html (templates) - @app.route('/favicon.ico') + @app.route("/favicon.ico") def serve_favicon(): - return send_from_directory('static/html', 'favicon.ico') + return send_from_directory("static/html", "favicon.ico") # Add request logging middleware @app.before_request @@ -209,7 +221,7 @@ def create_app(): @app.after_request def log_response(response): - if hasattr(request, 'start_time'): + if hasattr(request, "start_time"): duration = round((time.time() - request.start_time) * 1000, 2) app.logger.debug(f"Response: {response.status} | Duration: {duration}ms") return response @@ -222,41 +234,45 @@ def create_app(): return app + def start_celery_workers(): """Start Celery workers with dynamic configuration""" logging.info("Starting Celery workers with dynamic configuration") celery_manager.start() - + # Register shutdown handler atexit.register(celery_manager.stop) -if __name__ == '__main__': + +if __name__ == "__main__": # Configure application logging log_handler = setup_logging() - + # Set file permissions for log files if needed try: os.chmod(log_handler.baseFilename, 0o666) except: logging.warning("Could not set permissions on log file") - + # Log application startup logging.info("=== Spotizerr Application Starting ===") - + # Check Redis connection before starting workers if check_redis_connection(): # Start Watch Manager from routes.utils.watch.manager import start_watch_manager + start_watch_manager() # Start Celery workers start_celery_workers() - + # Create and start Flask app app = create_app() logging.info("Starting Flask server on port 7171") from waitress import serve - serve(app, host='0.0.0.0', port=7171) + + serve(app, host="0.0.0.0", port=7171) else: logging.error("Cannot start application: Redis connection failed") sys.exit(1) From 0007b4faa303c66939ce5d2a81f031d5e640b94d Mon Sep 17 00:00:00 2001 From: Mustafa Soylu Date: Sat, 7 Jun 2025 18:09:29 +0200 Subject: [PATCH 2/4] fix: celery fails to start --- routes/utils/celery_manager.py | 286 +++++-- routes/utils/celery_tasks.py | 1327 ++++++++++++++++++++------------ 2 files changed, 1027 insertions(+), 586 deletions(-) diff --git a/routes/utils/celery_manager.py b/routes/utils/celery_manager.py index 096808e..f32d1aa 100644 --- a/routes/utils/celery_manager.py +++ b/routes/utils/celery_manager.py @@ -19,29 +19,26 @@ from .celery_tasks import ( store_task_status, get_all_tasks as get_all_celery_tasks_info, cleanup_stale_errors, - delayed_delete_task_data + delayed_delete_task_data, ) from .celery_config import get_config_params, MAX_CONCURRENT_DL -# Import history manager -from .history_manager import init_history_db -# Import credentials manager for DB init -from .credentials import init_credentials_db # Configure logging logger = logging.getLogger(__name__) # Configuration -CONFIG_PATH = './data/config/main.json' -CELERY_APP = 'routes.utils.celery_tasks.celery_app' +CONFIG_PATH = "./data/config/main.json" +CELERY_APP = "routes.utils.celery_tasks.celery_app" CELERY_PROCESS = None CONFIG_CHECK_INTERVAL = 30 # seconds + class CeleryManager: """ Manages Celery workers dynamically based on configuration changes. """ - - def __init__(self, app_name="download_tasks"): + + def __init__(self, app_name="routes.utils.celery_tasks"): self.app_name = app_name self.download_worker_process = None self.utility_worker_process = None @@ -52,22 +49,31 @@ class CeleryManager: self.stop_event = threading.Event() self.config_monitor_thread = None # self.concurrency now specifically refers to download worker concurrency - self.concurrency = get_config_params().get('maxConcurrentDownloads', MAX_CONCURRENT_DL) - logger.info(f"CeleryManager initialized. Download concurrency set to: {self.concurrency}") - - def _get_worker_command(self, queues, concurrency, worker_name_suffix, log_level="INFO"): + self.concurrency = get_config_params().get( + "maxConcurrentDownloads", MAX_CONCURRENT_DL + ) + logger.info( + f"CeleryManager initialized. Download concurrency set to: {self.concurrency}" + ) + + def _get_worker_command( + self, queues, concurrency, worker_name_suffix, log_level="INFO" + ): # Use a unique worker name to avoid conflicts. # %h is replaced by celery with the actual hostname. hostname = f"worker_{worker_name_suffix}@%h" command = [ "celery", - "-A", self.app_name, + "-A", + self.app_name, "worker", "--loglevel=" + log_level, - "-Q", queues, - "-c", str(concurrency), + "-Q", + queues, + "-c", + str(concurrency), "--hostname=" + hostname, - "--pool=prefork" + "--pool=prefork", ] # Optionally add --without-gossip, --without-mingle, --without-heartbeat # if experiencing issues or to reduce network load, but defaults are usually fine. @@ -78,155 +84,265 @@ class CeleryManager: def _process_output_reader(self, stream, log_prefix, error=False): logger.debug(f"Log reader thread started for {log_prefix}") try: - for line in iter(stream.readline, ''): + for line in iter(stream.readline, ""): if line: log_method = logger.error if error else logger.info log_method(f"{log_prefix}: {line.strip()}") - elif self.stop_event.is_set(): # If empty line and stop is set, likely EOF + elif ( + self.stop_event.is_set() + ): # If empty line and stop is set, likely EOF break # Loop may also exit if stream is closed by process termination - except ValueError: #ValueError: I/O operation on closed file + except ValueError: # ValueError: I/O operation on closed file if not self.stop_event.is_set(): - logger.error(f"Error reading Celery output from {log_prefix} (ValueError - stream closed unexpectedly?)", exc_info=False) # Don't print full trace for common close error + logger.error( + f"Error reading Celery output from {log_prefix} (ValueError - stream closed unexpectedly?)", + exc_info=False, + ) # Don't print full trace for common close error else: - logger.info(f"{log_prefix} stream reader gracefully stopped due to closed stream after stop signal.") + logger.info( + f"{log_prefix} stream reader gracefully stopped due to closed stream after stop signal." + ) except Exception as e: - logger.error(f"Unexpected error in log reader for {log_prefix}: {e}", exc_info=True) + logger.error( + f"Unexpected error in log reader for {log_prefix}: {e}", exc_info=True + ) finally: - if hasattr(stream, 'close') and not stream.closed: + if hasattr(stream, "close") and not stream.closed: stream.close() logger.info(f"{log_prefix} stream reader thread finished.") def start(self): - self.stop_event.clear() # Clear stop event before starting + self.stop_event.clear() # Clear stop event before starting # Start Download Worker if self.download_worker_process and self.download_worker_process.poll() is None: logger.info("Celery Download Worker is already running.") else: - self.concurrency = get_config_params().get('maxConcurrentDownloads', self.concurrency) + self.concurrency = get_config_params().get( + "maxConcurrentDownloads", self.concurrency + ) download_cmd = self._get_worker_command( queues="downloads", concurrency=self.concurrency, - worker_name_suffix="dlw" # Download Worker + worker_name_suffix="dlw", # Download Worker + ) + logger.info( + f"Starting Celery Download Worker with command: {' '.join(download_cmd)}" ) - logger.info(f"Starting Celery Download Worker with command: {' '.join(download_cmd)}") self.download_worker_process = subprocess.Popen( - download_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True + download_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + bufsize=1, + universal_newlines=True, + ) + self.download_log_thread_stdout = threading.Thread( + target=self._process_output_reader, + args=(self.download_worker_process.stdout, "Celery[DW-STDOUT]"), + ) + self.download_log_thread_stderr = threading.Thread( + target=self._process_output_reader, + args=(self.download_worker_process.stderr, "Celery[DW-STDERR]", True), ) - self.download_log_thread_stdout = threading.Thread(target=self._process_output_reader, args=(self.download_worker_process.stdout, "Celery[DW-STDOUT]")) - self.download_log_thread_stderr = threading.Thread(target=self._process_output_reader, args=(self.download_worker_process.stderr, "Celery[DW-STDERR]", True)) self.download_log_thread_stdout.start() self.download_log_thread_stderr.start() - logger.info(f"Celery Download Worker (PID: {self.download_worker_process.pid}) started with concurrency {self.concurrency}.") + logger.info( + f"Celery Download Worker (PID: {self.download_worker_process.pid}) started with concurrency {self.concurrency}." + ) # Start Utility Worker if self.utility_worker_process and self.utility_worker_process.poll() is None: logger.info("Celery Utility Worker is already running.") else: utility_cmd = self._get_worker_command( - queues="utility_tasks,default", # Listen to utility and default + queues="utility_tasks,default", # Listen to utility and default concurrency=3, - worker_name_suffix="utw" # Utility Worker + worker_name_suffix="utw", # Utility Worker + ) + logger.info( + f"Starting Celery Utility Worker with command: {' '.join(utility_cmd)}" ) - logger.info(f"Starting Celery Utility Worker with command: {' '.join(utility_cmd)}") self.utility_worker_process = subprocess.Popen( - utility_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True + utility_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + bufsize=1, + universal_newlines=True, + ) + self.utility_log_thread_stdout = threading.Thread( + target=self._process_output_reader, + args=(self.utility_worker_process.stdout, "Celery[UW-STDOUT]"), + ) + self.utility_log_thread_stderr = threading.Thread( + target=self._process_output_reader, + args=(self.utility_worker_process.stderr, "Celery[UW-STDERR]", True), ) - self.utility_log_thread_stdout = threading.Thread(target=self._process_output_reader, args=(self.utility_worker_process.stdout, "Celery[UW-STDOUT]")) - self.utility_log_thread_stderr = threading.Thread(target=self._process_output_reader, args=(self.utility_worker_process.stderr, "Celery[UW-STDERR]", True)) self.utility_log_thread_stdout.start() self.utility_log_thread_stderr.start() - logger.info(f"Celery Utility Worker (PID: {self.utility_worker_process.pid}) started with concurrency 3.") + logger.info( + f"Celery Utility Worker (PID: {self.utility_worker_process.pid}) started with concurrency 3." + ) - if self.config_monitor_thread is None or not self.config_monitor_thread.is_alive(): - self.config_monitor_thread = threading.Thread(target=self._monitor_config_changes) - self.config_monitor_thread.daemon = True # Allow main program to exit even if this thread is running + if ( + self.config_monitor_thread is None + or not self.config_monitor_thread.is_alive() + ): + self.config_monitor_thread = threading.Thread( + target=self._monitor_config_changes + ) + self.config_monitor_thread.daemon = ( + True # Allow main program to exit even if this thread is running + ) self.config_monitor_thread.start() logger.info("CeleryManager: Config monitor thread started.") else: logger.info("CeleryManager: Config monitor thread already running.") def _monitor_config_changes(self): - logger.info("CeleryManager: Config monitor thread active, monitoring configuration changes...") + logger.info( + "CeleryManager: Config monitor thread active, monitoring configuration changes..." + ) while not self.stop_event.is_set(): try: time.sleep(10) # Check every 10 seconds - if self.stop_event.is_set(): break + if self.stop_event.is_set(): + break current_config = get_config_params() - new_max_concurrent_downloads = current_config.get('maxConcurrentDownloads', self.concurrency) + new_max_concurrent_downloads = current_config.get( + "maxConcurrentDownloads", self.concurrency + ) if new_max_concurrent_downloads != self.concurrency: - logger.info(f"CeleryManager: Detected change in maxConcurrentDownloads from {self.concurrency} to {new_max_concurrent_downloads}. Restarting download worker only.") - + logger.info( + f"CeleryManager: Detected change in maxConcurrentDownloads from {self.concurrency} to {new_max_concurrent_downloads}. Restarting download worker only." + ) + # Stop only the download worker - if self.download_worker_process and self.download_worker_process.poll() is None: - logger.info(f"Stopping Celery Download Worker (PID: {self.download_worker_process.pid}) for config update...") + if ( + self.download_worker_process + and self.download_worker_process.poll() is None + ): + logger.info( + f"Stopping Celery Download Worker (PID: {self.download_worker_process.pid}) for config update..." + ) self.download_worker_process.terminate() try: self.download_worker_process.wait(timeout=10) - logger.info(f"Celery Download Worker (PID: {self.download_worker_process.pid}) terminated.") + logger.info( + f"Celery Download Worker (PID: {self.download_worker_process.pid}) terminated." + ) except subprocess.TimeoutExpired: - logger.warning(f"Celery Download Worker (PID: {self.download_worker_process.pid}) did not terminate gracefully, killing.") + logger.warning( + f"Celery Download Worker (PID: {self.download_worker_process.pid}) did not terminate gracefully, killing." + ) self.download_worker_process.kill() self.download_worker_process = None - + # Wait for log threads of download worker to finish - if self.download_log_thread_stdout and self.download_log_thread_stdout.is_alive(): + if ( + self.download_log_thread_stdout + and self.download_log_thread_stdout.is_alive() + ): self.download_log_thread_stdout.join(timeout=5) - if self.download_log_thread_stderr and self.download_log_thread_stderr.is_alive(): + if ( + self.download_log_thread_stderr + and self.download_log_thread_stderr.is_alive() + ): self.download_log_thread_stderr.join(timeout=5) self.concurrency = new_max_concurrent_downloads - + # Restart only the download worker - download_cmd = self._get_worker_command("downloads", self.concurrency, "dlw") - logger.info(f"Restarting Celery Download Worker with command: {' '.join(download_cmd)}") - self.download_worker_process = subprocess.Popen( - download_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True + download_cmd = self._get_worker_command( + "downloads", self.concurrency, "dlw" + ) + logger.info( + f"Restarting Celery Download Worker with command: {' '.join(download_cmd)}" + ) + self.download_worker_process = subprocess.Popen( + download_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + bufsize=1, + universal_newlines=True, + ) + self.download_log_thread_stdout = threading.Thread( + target=self._process_output_reader, + args=(self.download_worker_process.stdout, "Celery[DW-STDOUT]"), + ) + self.download_log_thread_stderr = threading.Thread( + target=self._process_output_reader, + args=( + self.download_worker_process.stderr, + "Celery[DW-STDERR]", + True, + ), ) - self.download_log_thread_stdout = threading.Thread(target=self._process_output_reader, args=(self.download_worker_process.stdout, "Celery[DW-STDOUT]")) - self.download_log_thread_stderr = threading.Thread(target=self._process_output_reader, args=(self.download_worker_process.stderr, "Celery[DW-STDERR]", True)) self.download_log_thread_stdout.start() self.download_log_thread_stderr.start() - logger.info(f"Celery Download Worker (PID: {self.download_worker_process.pid}) restarted with new concurrency {self.concurrency}.") + logger.info( + f"Celery Download Worker (PID: {self.download_worker_process.pid}) restarted with new concurrency {self.concurrency}." + ) except Exception as e: - logger.error(f"CeleryManager: Error in config monitor thread: {e}", exc_info=True) + logger.error( + f"CeleryManager: Error in config monitor thread: {e}", exc_info=True + ) # Avoid busy-looping on continuous errors - if not self.stop_event.is_set(): time.sleep(30) + if not self.stop_event.is_set(): + time.sleep(30) logger.info("CeleryManager: Config monitor thread stopped.") - + def _stop_worker_process(self, worker_process, worker_name): if worker_process and worker_process.poll() is None: - logger.info(f"Terminating Celery {worker_name} Worker (PID: {worker_process.pid})...") + logger.info( + f"Terminating Celery {worker_name} Worker (PID: {worker_process.pid})..." + ) worker_process.terminate() try: worker_process.wait(timeout=10) - logger.info(f"Celery {worker_name} Worker (PID: {worker_process.pid}) terminated.") + logger.info( + f"Celery {worker_name} Worker (PID: {worker_process.pid}) terminated." + ) except subprocess.TimeoutExpired: - logger.warning(f"Celery {worker_name} Worker (PID: {worker_process.pid}) did not terminate gracefully, killing.") + logger.warning( + f"Celery {worker_name} Worker (PID: {worker_process.pid}) did not terminate gracefully, killing." + ) worker_process.kill() - return None # Set process to None after stopping + return None # Set process to None after stopping def stop(self): logger.info("CeleryManager: Stopping Celery workers...") - self.stop_event.set() # Signal all threads to stop + self.stop_event.set() # Signal all threads to stop # Stop download worker - self.download_worker_process = self._stop_worker_process(self.download_worker_process, "Download") - + self.download_worker_process = self._stop_worker_process( + self.download_worker_process, "Download" + ) + # Stop utility worker - self.utility_worker_process = self._stop_worker_process(self.utility_worker_process, "Utility") + self.utility_worker_process = self._stop_worker_process( + self.utility_worker_process, "Utility" + ) logger.info("Joining log threads...") - thread_timeout = 5 # seconds to wait for log threads + thread_timeout = 5 # seconds to wait for log threads # Join download worker log threads - if self.download_log_thread_stdout and self.download_log_thread_stdout.is_alive(): + if ( + self.download_log_thread_stdout + and self.download_log_thread_stdout.is_alive() + ): self.download_log_thread_stdout.join(timeout=thread_timeout) - if self.download_log_thread_stderr and self.download_log_thread_stderr.is_alive(): + if ( + self.download_log_thread_stderr + and self.download_log_thread_stderr.is_alive() + ): self.download_log_thread_stderr.join(timeout=thread_timeout) # Join utility worker log threads @@ -238,24 +354,30 @@ class CeleryManager: if self.config_monitor_thread and self.config_monitor_thread.is_alive(): logger.info("Joining config_monitor_thread...") self.config_monitor_thread.join(timeout=thread_timeout) - - logger.info("CeleryManager: All workers and threads signaled to stop and joined.") + + logger.info( + "CeleryManager: All workers and threads signaled to stop and joined." + ) def restart(self): logger.info("CeleryManager: Restarting all Celery workers...") self.stop() # Short delay before restarting logger.info("Waiting a brief moment before restarting workers...") - time.sleep(2) + time.sleep(2) self.start() logger.info("CeleryManager: All Celery workers restarted.") + # Global instance for managing Celery workers celery_manager = CeleryManager() # Example of how to use the manager (typically called from your main app script) -if __name__ == '__main__': - logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] [%(threadName)s] [%(name)s] - %(message)s') +if __name__ == "__main__": + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] [%(threadName)s] [%(name)s] - %(message)s", + ) logger.info("Starting Celery Manager example...") celery_manager.start() try: @@ -265,4 +387,4 @@ if __name__ == '__main__': logger.info("Keyboard interrupt received, stopping Celery Manager...") finally: celery_manager.stop() - logger.info("Celery Manager example finished.") \ No newline at end of file + logger.info("Celery Manager example finished.") diff --git a/routes/utils/celery_tasks.py b/routes/utils/celery_tasks.py index fd45b6f..7db9635 100644 --- a/routes/utils/celery_tasks.py +++ b/routes/utils/celery_tasks.py @@ -5,36 +5,55 @@ import logging import traceback from datetime import datetime from celery import Celery, Task, states -from celery.signals import task_prerun, task_postrun, task_failure, worker_ready, worker_init, setup_logging +from celery.signals import ( + task_prerun, + task_postrun, + task_failure, + worker_ready, + worker_init, + setup_logging, +) from celery.exceptions import Retry -import os # Added for path operations -from pathlib import Path # Added for path operations +import os # Added for path operations +from pathlib import Path # Added for path operations # Configure logging logger = logging.getLogger(__name__) # Setup Redis and Celery -from routes.utils.celery_config import REDIS_URL, REDIS_BACKEND, REDIS_PASSWORD, get_config_params +from routes.utils.celery_config import ( + REDIS_URL, + REDIS_BACKEND, + REDIS_PASSWORD, + get_config_params, +) + # Import for playlist watch DB update -from routes.utils.watch.db import add_single_track_to_playlist_db, add_or_update_album_for_artist +from routes.utils.watch.db import ( + add_single_track_to_playlist_db, + add_or_update_album_for_artist, +) # Import history manager function from .history_manager import add_entry_to_history # Initialize Celery app -celery_app = Celery('download_tasks', - broker=REDIS_URL, - backend=REDIS_BACKEND) +celery_app = Celery( + "routes.utils.celery_tasks", broker=REDIS_URL, backend=REDIS_BACKEND +) # Load Celery config -celery_app.config_from_object('routes.utils.celery_config') +celery_app.config_from_object("routes.utils.celery_config") # Create Redis connection for storing task data that's not part of the Celery result backend import redis + redis_client = redis.Redis.from_url(REDIS_URL) + class ProgressState: """Enum-like class for progress states""" + QUEUED = "queued" PROCESSING = "processing" COMPLETE = "complete" @@ -42,7 +61,7 @@ class ProgressState: RETRYING = "retrying" CANCELLED = "cancelled" PROGRESS = "progress" - + # Additional states from deezspot library INITIALIZING = "initializing" DOWNLOADING = "downloading" @@ -51,8 +70,11 @@ class ProgressState: REAL_TIME = "real_time" SKIPPED = "skipped" DONE = "done" - ERROR_RETRIED = "ERROR_RETRIED" # Status for an error task that has been retried - ERROR_AUTO_CLEANED = "ERROR_AUTO_CLEANED" # Status for an error task that was auto-cleaned + ERROR_RETRIED = "ERROR_RETRIED" # Status for an error task that has been retried + ERROR_AUTO_CLEANED = ( + "ERROR_AUTO_CLEANED" # Status for an error task that was auto-cleaned + ) + # Reuse the application's logging configuration for Celery workers @setup_logging.connect @@ -64,59 +86,68 @@ def setup_celery_logging(**kwargs): # Using the root logger's handlers and level preserves our config return logging.getLogger() + # The initialization of a worker will log the worker configuration @worker_init.connect def worker_init_handler(**kwargs): """Log when a worker initializes with its configuration details""" config = get_config_params() - logger.info(f"Celery worker initialized with concurrency {config.get('maxConcurrentDownloads', 3)}") - logger.info(f"Worker config: spotifyQuality={config.get('spotifyQuality')}, deezerQuality={config.get('deezerQuality')}") + logger.info( + f"Celery worker initialized with concurrency {config.get('maxConcurrentDownloads', 3)}" + ) + logger.info( + f"Worker config: spotifyQuality={config.get('spotifyQuality')}, deezerQuality={config.get('deezerQuality')}" + ) logger.debug("Worker Redis connection: " + REDIS_URL) + def store_task_status(task_id, status_data): """ Store task status information in Redis with a sequential ID - + Args: task_id: The task ID status_data: Dictionary containing status information """ # Add timestamp if not present - if 'timestamp' not in status_data: - status_data['timestamp'] = time.time() - + if "timestamp" not in status_data: + status_data["timestamp"] = time.time() + try: # Get next ID for this task's status updates status_id = redis_client.incr(f"task:{task_id}:status:next_id") - status_data['id'] = status_id - + status_data["id"] = status_id + # Convert to JSON and store in Redis redis_client.rpush(f"task:{task_id}:status", json.dumps(status_data)) - + # Set expiry for the list to avoid filling up Redis with old data redis_client.expire(f"task:{task_id}:status", 60 * 60 * 24 * 7) # 7 days - redis_client.expire(f"task:{task_id}:status:next_id", 60 * 60 * 24 * 7) # 7 days - + redis_client.expire( + f"task:{task_id}:status:next_id", 60 * 60 * 24 * 7 + ) # 7 days + # Publish an update event to a Redis channel for subscribers # This will be used by the SSE endpoint to push updates in real-time update_channel = f"task_updates:{task_id}" - redis_client.publish(update_channel, json.dumps({ - "task_id": task_id, - "status_id": status_id - })) + redis_client.publish( + update_channel, json.dumps({"task_id": task_id, "status_id": status_id}) + ) except Exception as e: logger.error(f"Error storing task status: {e}") traceback.print_exc() + def get_task_status(task_id): """Get all task status updates from Redis""" try: status_list = redis_client.lrange(f"task:{task_id}:status", 0, -1) - return [json.loads(s.decode('utf-8')) for s in status_list] + return [json.loads(s.decode("utf-8")) for s in status_list] except Exception as e: logger.error(f"Error getting task status: {e}") return [] + def get_last_task_status(task_id): """Get the most recent task status update from Redis""" try: @@ -124,12 +155,13 @@ def get_last_task_status(task_id): status_list = redis_client.lrange(f"task:{task_id}:status", -1, -1) if not status_list: return None - - return json.loads(status_list[0].decode('utf-8')) + + return json.loads(status_list[0].decode("utf-8")) except Exception as e: logger.error(f"Error getting last task status: {e}") return None + def store_task_info(task_id, task_info): """Store task information in Redis""" try: @@ -138,17 +170,19 @@ def store_task_info(task_id, task_info): except Exception as e: logger.error(f"Error storing task info: {e}") + def get_task_info(task_id): """Get task information from Redis""" try: task_info = redis_client.get(f"task:{task_id}:info") if task_info: - return json.loads(task_info.decode('utf-8')) + return json.loads(task_info.decode("utf-8")) return {} except Exception as e: logger.error(f"Error getting task info: {e}") return {} + # --- History Logging Helper --- def _log_task_to_history(task_id, final_status_str, error_msg=None): """Helper function to gather task data and log it to the history database.""" @@ -157,96 +191,133 @@ def _log_task_to_history(task_id, final_status_str, error_msg=None): last_status_obj = get_last_task_status(task_id) if not task_info: - logger.warning(f"History: No task_info found for task_id {task_id}. Cannot log to history.") + logger.warning( + f"History: No task_info found for task_id {task_id}. Cannot log to history." + ) return # Determine service_used and quality_profile - main_service_name = str(task_info.get('main', 'Unknown')).capitalize() # e.g. Spotify, Deezer from their respective .env values - fallback_service_name = str(task_info.get('fallback', '')).capitalize() + main_service_name = str( + task_info.get("main", "Unknown") + ).capitalize() # e.g. Spotify, Deezer from their respective .env values + fallback_service_name = str(task_info.get("fallback", "")).capitalize() service_used_str = main_service_name - if task_info.get('fallback') and fallback_service_name: # Check if fallback was configured - # Try to infer actual service used if possible, otherwise show configured. - # This part is a placeholder for more accurate determination if deezspot gives explicit feedback. - # For now, we assume 'main' was used unless an error hints otherwise. - # A more robust solution would involve deezspot callback providing this. - service_used_str = f"{main_service_name} (Fallback: {fallback_service_name})" + if ( + task_info.get("fallback") and fallback_service_name + ): # Check if fallback was configured + # Try to infer actual service used if possible, otherwise show configured. + # This part is a placeholder for more accurate determination if deezspot gives explicit feedback. + # For now, we assume 'main' was used unless an error hints otherwise. + # A more robust solution would involve deezspot callback providing this. + service_used_str = ( + f"{main_service_name} (Fallback: {fallback_service_name})" + ) # If error message indicates fallback, we could try to parse it. # e.g. if error_msg and "fallback" in error_msg.lower(): service_used_str = f"{fallback_service_name} (Used Fallback)" # Determine quality profile (primarily from the 'quality' field) # 'quality' usually holds the primary service's quality (e.g., spotifyQuality, deezerQuality) - quality_profile_str = str(task_info.get('quality', 'N/A')) + quality_profile_str = str(task_info.get("quality", "N/A")) # Get convertTo and bitrate - convert_to_str = str(task_info.get('convertTo', '')) # Empty string if None or not present - bitrate_str = str(task_info.get('bitrate', '')) # Empty string if None or not present + convert_to_str = str( + task_info.get("convertTo", "") + ) # Empty string if None or not present + bitrate_str = str( + task_info.get("bitrate", "") + ) # Empty string if None or not present # Extract Spotify ID from item URL if possible spotify_id = None - item_url = task_info.get('url', '') + item_url = task_info.get("url", "") if item_url: try: - spotify_id = item_url.split('/')[-1] + spotify_id = item_url.split("/")[-1] # Further validation if it looks like a Spotify ID (e.g., 22 chars, alphanumeric) if not (spotify_id and len(spotify_id) == 22 and spotify_id.isalnum()): - spotify_id = None # Reset if not a valid-looking ID + spotify_id = None # Reset if not a valid-looking ID except Exception: - spotify_id = None # Ignore errors in parsing + spotify_id = None # Ignore errors in parsing history_entry = { - 'task_id': task_id, - 'download_type': task_info.get('download_type'), - 'item_name': task_info.get('name'), - 'item_artist': task_info.get('artist'), - 'item_album': task_info.get('album', task_info.get('name') if task_info.get('download_type') == 'album' else None), - 'item_url': item_url, - 'spotify_id': spotify_id, - 'status_final': final_status_str, - 'error_message': error_msg if error_msg else (last_status_obj.get('error') if last_status_obj else None), - 'timestamp_added': task_info.get('created_at', time.time()), - 'timestamp_completed': last_status_obj.get('timestamp', time.time()) if last_status_obj else time.time(), - 'original_request_json': json.dumps(task_info.get('original_request', {})), - 'last_status_obj_json': json.dumps(last_status_obj if last_status_obj else {}), - 'service_used': service_used_str, - 'quality_profile': quality_profile_str, - 'convert_to': convert_to_str if convert_to_str else None, # Store None if empty string - 'bitrate': bitrate_str if bitrate_str else None # Store None if empty string + "task_id": task_id, + "download_type": task_info.get("download_type"), + "item_name": task_info.get("name"), + "item_artist": task_info.get("artist"), + "item_album": task_info.get( + "album", + task_info.get("name") + if task_info.get("download_type") == "album" + else None, + ), + "item_url": item_url, + "spotify_id": spotify_id, + "status_final": final_status_str, + "error_message": error_msg + if error_msg + else (last_status_obj.get("error") if last_status_obj else None), + "timestamp_added": task_info.get("created_at", time.time()), + "timestamp_completed": last_status_obj.get("timestamp", time.time()) + if last_status_obj + else time.time(), + "original_request_json": json.dumps(task_info.get("original_request", {})), + "last_status_obj_json": json.dumps( + last_status_obj if last_status_obj else {} + ), + "service_used": service_used_str, + "quality_profile": quality_profile_str, + "convert_to": convert_to_str + if convert_to_str + else None, # Store None if empty string + "bitrate": bitrate_str + if bitrate_str + else None, # Store None if empty string } add_entry_to_history(history_entry) except Exception as e: - logger.error(f"History: Error preparing or logging history for task {task_id}: {e}", exc_info=True) + logger.error( + f"History: Error preparing or logging history for task {task_id}: {e}", + exc_info=True, + ) + # --- End History Logging Helper --- + def cancel_task(task_id): """Cancel a task by its ID""" try: # Mark the task as cancelled in Redis - store_task_status(task_id, { - "status": ProgressState.CANCELLED, - "error": "Task cancelled by user", - "timestamp": time.time() - }) - + store_task_status( + task_id, + { + "status": ProgressState.CANCELLED, + "error": "Task cancelled by user", + "timestamp": time.time(), + }, + ) + # Try to revoke the Celery task if it hasn't started yet - celery_app.control.revoke(task_id, terminate=True, signal='SIGTERM') - + celery_app.control.revoke(task_id, terminate=True, signal="SIGTERM") + # Log cancellation to history - _log_task_to_history(task_id, 'CANCELLED', "Task cancelled by user") + _log_task_to_history(task_id, "CANCELLED", "Task cancelled by user") # Schedule deletion of task data after 30 seconds delayed_delete_task_data.apply_async( - args=[task_id, "Task cancelled by user and auto-cleaned."], - countdown=30 + args=[task_id, "Task cancelled by user and auto-cleaned."], countdown=30 + ) + logger.info( + f"Task {task_id} cancelled by user. Data scheduled for deletion in 30s." ) - logger.info(f"Task {task_id} cancelled by user. Data scheduled for deletion in 30s.") return {"status": "cancelled", "task_id": task_id} except Exception as e: logger.error(f"Error cancelling task {task_id}: {e}") return {"status": "error", "message": str(e)} + def retry_task(task_id): """Retry a failed task""" try: @@ -254,59 +325,61 @@ def retry_task(task_id): task_info = get_task_info(task_id) if not task_info: return {"status": "error", "error": f"Task {task_id} not found"} - + # Check if task has error status last_status = get_last_task_status(task_id) if not last_status or last_status.get("status") != ProgressState.ERROR: return {"status": "error", "error": "Task is not in a failed state"} - + # Get current retry count retry_count = last_status.get("retry_count", 0) - + # Get retry configuration from config config_params = get_config_params() - max_retries = config_params.get('maxRetries', 3) - initial_retry_delay = config_params.get('retryDelaySeconds', 5) - retry_delay_increase = config_params.get('retry_delay_increase', 5) - + max_retries = config_params.get("maxRetries", 3) + initial_retry_delay = config_params.get("retryDelaySeconds", 5) + retry_delay_increase = config_params.get("retry_delay_increase", 5) + # Check if we've exceeded max retries if retry_count >= max_retries: return { "status": "error", - "error": f"Maximum retry attempts ({max_retries}) exceeded" + "error": f"Maximum retry attempts ({max_retries}) exceeded", } - + # Calculate retry delay retry_delay = initial_retry_delay + (retry_count * retry_delay_increase) - + # Create a new task_id for the retry new_task_id = f"{task_id}_retry{retry_count + 1}" - + # Update task info for the retry task_info["retry_count"] = retry_count + 1 task_info["retry_of"] = task_id - + # Use retry_url if available, otherwise use the original url if "retry_url" in task_info and task_info["retry_url"]: task_info["url"] = task_info["retry_url"] - + # Get service configuration service = config_params.get("service") fallback_enabled = config_params.get("fallback", False) - + # Update service settings - if service == 'spotify': + if service == "spotify": if fallback_enabled: task_info["main"] = config_params.get("deezer", "") task_info["fallback"] = config_params.get("spotify", "") task_info["quality"] = config_params.get("deezerQuality", "MP3_128") - task_info["fall_quality"] = config_params.get("spotifyQuality", "NORMAL") + task_info["fall_quality"] = config_params.get( + "spotifyQuality", "NORMAL" + ) else: task_info["main"] = config_params.get("spotify", "") task_info["fallback"] = None task_info["quality"] = config_params.get("spotifyQuality", "NORMAL") task_info["fall_quality"] = None - elif service == 'deezer': + elif service == "deezer": task_info["main"] = config_params.get("deezer", "") task_info["fallback"] = None task_info["quality"] = config_params.get("deezerQuality", "MP3_128") @@ -316,274 +389,313 @@ def retry_task(task_id): task_info["fallback"] = None task_info["quality"] = config_params.get("spotifyQuality", "NORMAL") task_info["fall_quality"] = None - + # Ensure service comes from config for the retry task_info["service"] = service - + # Update other config-derived parameters - task_info["real_time"] = task_info.get("real_time", config_params.get("realTime", False)) - task_info["custom_dir_format"] = task_info.get("custom_dir_format", config_params.get("customDirFormat", "%ar_album%/%album%")) - task_info["custom_track_format"] = task_info.get("custom_track_format", config_params.get("customTrackFormat", "%tracknum%. %music%")) - task_info["pad_tracks"] = task_info.get("pad_tracks", config_params.get("tracknum_padding", True)) - + task_info["real_time"] = task_info.get( + "real_time", config_params.get("realTime", False) + ) + task_info["custom_dir_format"] = task_info.get( + "custom_dir_format", + config_params.get("customDirFormat", "%ar_album%/%album%"), + ) + task_info["custom_track_format"] = task_info.get( + "custom_track_format", + config_params.get("customTrackFormat", "%tracknum%. %music%"), + ) + task_info["pad_tracks"] = task_info.get( + "pad_tracks", config_params.get("tracknum_padding", True) + ) + # Store the updated task info store_task_info(new_task_id, task_info) - + # Create a queued status - store_task_status(new_task_id, { - "status": ProgressState.QUEUED, - "type": task_info.get("type", "unknown"), - "name": task_info.get("name", "Unknown"), - "artist": task_info.get("artist", ""), - "retry_count": retry_count + 1, - "max_retries": max_retries, - "retry_delay": retry_delay, - "timestamp": time.time() - }) - + store_task_status( + new_task_id, + { + "status": ProgressState.QUEUED, + "type": task_info.get("type", "unknown"), + "name": task_info.get("name", "Unknown"), + "artist": task_info.get("artist", ""), + "retry_count": retry_count + 1, + "max_retries": max_retries, + "retry_delay": retry_delay, + "timestamp": time.time(), + }, + ) + # Launch the appropriate task based on download_type download_type = task_info.get("download_type", "unknown") new_celery_task_obj = None - - logger.info(f"Retrying task {task_id} as {new_task_id} (retry {retry_count + 1}/{max_retries})") - + + logger.info( + f"Retrying task {task_id} as {new_task_id} (retry {retry_count + 1}/{max_retries})" + ) + if download_type == "track": new_celery_task_obj = download_track.apply_async( - kwargs=task_info, - task_id=new_task_id, - queue='downloads' + kwargs=task_info, task_id=new_task_id, queue="downloads" ) elif download_type == "album": new_celery_task_obj = download_album.apply_async( - kwargs=task_info, - task_id=new_task_id, - queue='downloads' + kwargs=task_info, task_id=new_task_id, queue="downloads" ) elif download_type == "playlist": new_celery_task_obj = download_playlist.apply_async( - kwargs=task_info, - task_id=new_task_id, - queue='downloads' + kwargs=task_info, task_id=new_task_id, queue="downloads" ) else: logger.error(f"Unknown download type for retry: {download_type}") - store_task_status(new_task_id, { - "status": ProgressState.ERROR, - "error": f"Cannot retry: Unknown download type '{download_type}' for original task {task_id}", - "timestamp": time.time() - }) + store_task_status( + new_task_id, + { + "status": ProgressState.ERROR, + "error": f"Cannot retry: Unknown download type '{download_type}' for original task {task_id}", + "timestamp": time.time(), + }, + ) return { "status": "error", - "error": f"Unknown download type: {download_type}" + "error": f"Unknown download type: {download_type}", } # If retry was successfully submitted, update the original task's status if new_celery_task_obj: - store_task_status(task_id, { - "status": "ERROR_RETRIED", - "error": f"Task superseded by retry: {new_task_id}", - "retried_as_task_id": new_task_id, - "timestamp": time.time() - }) - logger.info(f"Original task {task_id} status updated to ERROR_RETRIED, superseded by {new_task_id}") + store_task_status( + task_id, + { + "status": "ERROR_RETRIED", + "error": f"Task superseded by retry: {new_task_id}", + "retried_as_task_id": new_task_id, + "timestamp": time.time(), + }, + ) + logger.info( + f"Original task {task_id} status updated to ERROR_RETRIED, superseded by {new_task_id}" + ) else: - logger.error(f"Retry submission for task {task_id} (as {new_task_id}) did not return a Celery AsyncResult. Original task not marked as ERROR_RETRIED.") - + logger.error( + f"Retry submission for task {task_id} (as {new_task_id}) did not return a Celery AsyncResult. Original task not marked as ERROR_RETRIED." + ) + return { "status": "requeued", "task_id": new_task_id, "retry_count": retry_count + 1, "max_retries": max_retries, - "retry_delay": retry_delay + "retry_delay": retry_delay, } except Exception as e: logger.error(f"Error retrying task {task_id}: {e}", exc_info=True) return {"status": "error", "error": str(e)} + def get_all_tasks(): """Get all active task IDs""" try: # Get all keys matching the task info pattern task_keys = redis_client.keys("task:*:info") - + # Extract task IDs from the keys - task_ids = [key.decode('utf-8').split(':')[1] for key in task_keys] - + task_ids = [key.decode("utf-8").split(":")[1] for key in task_keys] + # Get info for each task tasks = [] for task_id in task_ids: task_info = get_task_info(task_id) last_status = get_last_task_status(task_id) - + if task_info and last_status: - tasks.append({ - "task_id": task_id, - "type": task_info.get("type", "unknown"), - "name": task_info.get("name", "Unknown"), - "artist": task_info.get("artist", ""), - "download_type": task_info.get("download_type", "unknown"), - "status": last_status.get("status", "unknown"), - "timestamp": last_status.get("timestamp", 0) - }) - + tasks.append( + { + "task_id": task_id, + "type": task_info.get("type", "unknown"), + "name": task_info.get("name", "Unknown"), + "artist": task_info.get("artist", ""), + "download_type": task_info.get("download_type", "unknown"), + "status": last_status.get("status", "unknown"), + "timestamp": last_status.get("timestamp", 0), + } + ) + return tasks except Exception as e: logger.error(f"Error getting all tasks: {e}") return [] + class ProgressTrackingTask(Task): """Base task class that tracks progress through callbacks""" - + def progress_callback(self, progress_data): """ Process progress data from deezspot library callbacks using the optimized approach based on known status types and flow patterns. - + Args: progress_data: Dictionary containing progress information from deezspot """ task_id = self.request.id - + # Ensure ./logs/tasks directory exists - logs_tasks_dir = Path('./logs/tasks') # Using relative path as per your update + logs_tasks_dir = Path("./logs/tasks") # Using relative path as per your update try: logs_tasks_dir.mkdir(parents=True, exist_ok=True) except Exception as e: - logger.error(f"Task {task_id}: Could not create log directory {logs_tasks_dir}: {e}") + logger.error( + f"Task {task_id}: Could not create log directory {logs_tasks_dir}: {e}" + ) # Define log file path log_file_path = logs_tasks_dir / f"{task_id}.log" # Log progress_data to the task-specific file try: - with open(log_file_path, 'a') as log_file: + with open(log_file_path, "a") as log_file: # Add a timestamp to the log entry if not present, for consistency in the file log_entry = progress_data.copy() - if 'timestamp' not in log_entry: - log_entry['timestamp'] = time.time() - print(json.dumps(log_entry), file=log_file) # Use print to file + if "timestamp" not in log_entry: + log_entry["timestamp"] = time.time() + print(json.dumps(log_entry), file=log_file) # Use print to file except Exception as e: - logger.error(f"Task {task_id}: Could not write to task log file {log_file_path}: {e}") - + logger.error( + f"Task {task_id}: Could not write to task log file {log_file_path}: {e}" + ) + # Add timestamp if not present - if 'timestamp' not in progress_data: - progress_data['timestamp'] = time.time() - + if "timestamp" not in progress_data: + progress_data["timestamp"] = time.time() + # Get status type status = progress_data.get("status", "unknown") - + # Create a work copy of the data to avoid modifying the original stored_data = progress_data.copy() - + # Get task info for context task_info = get_task_info(task_id) - + # Log raw progress data at debug level if logger.isEnabledFor(logging.DEBUG): - logger.debug(f"Task {task_id}: Raw progress data: {json.dumps(progress_data)}") - + logger.debug( + f"Task {task_id}: Raw progress data: {json.dumps(progress_data)}" + ) + # Process based on status type using a more streamlined approach if status == "initializing": # --- INITIALIZING: Start of a download operation --- self._handle_initializing(task_id, stored_data, task_info) - + elif status == "downloading": # --- DOWNLOADING: Track download started --- self._handle_downloading(task_id, stored_data, task_info) - + elif status == "progress": # --- PROGRESS: Album/playlist track progress --- self._handle_progress(task_id, stored_data, task_info) - + elif status == "real_time" or status == "track_progress": # --- REAL_TIME/TRACK_PROGRESS: Track download real-time progress --- self._handle_real_time(task_id, stored_data) - + elif status == "skipped": # --- SKIPPED: Track was skipped --- self._handle_skipped(task_id, stored_data, task_info) - + elif status == "retrying": # --- RETRYING: Download failed and being retried --- self._handle_retrying(task_id, stored_data, task_info) - + elif status == "error": # --- ERROR: Error occurred during download --- self._handle_error(task_id, stored_data, task_info) - + elif status == "done": # --- DONE: Download operation completed --- self._handle_done(task_id, stored_data, task_info) - + else: # --- UNKNOWN: Unrecognized status --- - logger.info(f"Task {task_id} {status}: {stored_data.get('message', 'No details')}") - + logger.info( + f"Task {task_id} {status}: {stored_data.get('message', 'No details')}" + ) + # Store the processed status update store_task_status(task_id, stored_data) - + def _handle_initializing(self, task_id, data, task_info): """Handle initializing status from deezspot""" # Extract relevant fields - content_type = data.get('type', '').upper() - name = data.get('name', '') - album_name = data.get('album', '') - artist = data.get('artist', '') - total_tracks = data.get('total_tracks', 0) - + content_type = data.get("type", "").upper() + name = data.get("name", "") + album_name = data.get("album", "") + artist = data.get("artist", "") + total_tracks = data.get("total_tracks", 0) + # Use album name as name if name is empty if not name and album_name: - data['name'] = album_name - + data["name"] = album_name + # Log initialization with appropriate detail level if album_name and artist: - logger.info(f"Task {task_id} initializing: {content_type} '{album_name}' by {artist} with {total_tracks} tracks") + logger.info( + f"Task {task_id} initializing: {content_type} '{album_name}' by {artist} with {total_tracks} tracks" + ) elif album_name: - logger.info(f"Task {task_id} initializing: {content_type} '{album_name}' with {total_tracks} tracks") + logger.info( + f"Task {task_id} initializing: {content_type} '{album_name}' with {total_tracks} tracks" + ) elif name: - logger.info(f"Task {task_id} initializing: {content_type} '{name}' with {total_tracks} tracks") + logger.info( + f"Task {task_id} initializing: {content_type} '{name}' with {total_tracks} tracks" + ) else: - logger.info(f"Task {task_id} initializing: {content_type} with {total_tracks} tracks") - + logger.info( + f"Task {task_id} initializing: {content_type} with {total_tracks} tracks" + ) + # Update task info with total tracks count if total_tracks > 0: - task_info['total_tracks'] = total_tracks - task_info['completed_tracks'] = task_info.get('completed_tracks', 0) - task_info['skipped_tracks'] = task_info.get('skipped_tracks', 0) + task_info["total_tracks"] = total_tracks + task_info["completed_tracks"] = task_info.get("completed_tracks", 0) + task_info["skipped_tracks"] = task_info.get("skipped_tracks", 0) store_task_info(task_id, task_info) - + # Update status in data - data['status'] = ProgressState.INITIALIZING - + data["status"] = ProgressState.INITIALIZING + def _handle_downloading(self, task_id, data, task_info): """Handle downloading status from deezspot""" # Extract relevant fields - track_name = data.get('song', 'Unknown') - artist = data.get('artist', '') - album = data.get('album', '') - download_type = data.get('type', '') - + track_name = data.get("song", "Unknown") + artist = data.get("artist", "") + album = data.get("album", "") + download_type = data.get("type", "") + # Get parent task context - parent_type = task_info.get('type', '').lower() - + parent_type = task_info.get("type", "").lower() + # If this is a track within an album/playlist, update progress - if parent_type in ['album', 'playlist'] and download_type == 'track': - total_tracks = task_info.get('total_tracks', 0) - current_track = task_info.get('current_track_num', 0) + 1 - + if parent_type in ["album", "playlist"] and download_type == "track": + total_tracks = task_info.get("total_tracks", 0) + current_track = task_info.get("current_track_num", 0) + 1 + # Update task info - task_info['current_track_num'] = current_track - task_info['current_track'] = track_name - task_info['current_artist'] = artist + task_info["current_track_num"] = current_track + task_info["current_track"] = track_name + task_info["current_artist"] = artist store_task_info(task_id, task_info) - + # Only calculate progress if we have total tracks if total_tracks > 0: overall_progress = min(int((current_track / total_tracks) * 100), 100) - data['overall_progress'] = overall_progress - data['parsed_current_track'] = current_track - data['parsed_total_tracks'] = total_tracks - + data["overall_progress"] = overall_progress + data["parsed_current_track"] = current_track + data["parsed_total_tracks"] = total_tracks + # Create a progress update for the album/playlist progress_update = { "status": ProgressState.DOWNLOADING, @@ -592,24 +704,26 @@ class ProgressTrackingTask(Task): "current_track": f"{current_track}/{total_tracks}", "album": album, "artist": artist, - "timestamp": data['timestamp'], - "parent_task": True + "timestamp": data["timestamp"], + "parent_task": True, } - + # Store separate progress update store_task_status(task_id, progress_update) - + # Log with appropriate detail level if artist and album: - logger.info(f"Task {task_id} downloading: '{track_name}' by {artist} from {album}") + logger.info( + f"Task {task_id} downloading: '{track_name}' by {artist} from {album}" + ) elif artist: logger.info(f"Task {task_id} downloading: '{track_name}' by {artist}") else: logger.info(f"Task {task_id} downloading: '{track_name}'") - + # Update status - data['status'] = ProgressState.DOWNLOADING - + data["status"] = ProgressState.DOWNLOADING + def _handle_progress(self, task_id, data, task_info): """Handle progress status from deezspot""" # Extract track info @@ -617,206 +731,222 @@ class ProgressTrackingTask(Task): current_track_raw = data.get("current_track", "0") album = data.get("album", "") artist = data.get("artist", "") - + # Process artist if it's a list if isinstance(artist, list) and len(artist) > 0: data["artist_name"] = artist[0] elif isinstance(artist, str): data["artist_name"] = artist - + # Parse track numbers from "current/total" format if isinstance(current_track_raw, str) and "/" in current_track_raw: try: parts = current_track_raw.split("/") current_track = int(parts[0]) total_tracks = int(parts[1]) - + # Update with parsed values data["parsed_current_track"] = current_track data["parsed_total_tracks"] = total_tracks - + # Calculate percentage overall_progress = min(int((current_track / total_tracks) * 100), 100) data["overall_progress"] = overall_progress - + # Update task info - task_info['current_track_num'] = current_track - task_info['total_tracks'] = total_tracks - task_info['current_track'] = track_name + task_info["current_track_num"] = current_track + task_info["total_tracks"] = total_tracks + task_info["current_track"] = track_name store_task_info(task_id, task_info) - + # Log progress with appropriate detail artist_name = data.get("artist_name", artist) if album and artist_name: - logger.info(f"Task {task_id} progress: [{current_track}/{total_tracks}] {overall_progress}% - {track_name} by {artist_name} from {album}") + logger.info( + f"Task {task_id} progress: [{current_track}/{total_tracks}] {overall_progress}% - {track_name} by {artist_name} from {album}" + ) elif album: - logger.info(f"Task {task_id} progress: [{current_track}/{total_tracks}] {overall_progress}% - {track_name} from {album}") + logger.info( + f"Task {task_id} progress: [{current_track}/{total_tracks}] {overall_progress}% - {track_name} from {album}" + ) else: - logger.info(f"Task {task_id} progress: [{current_track}/{total_tracks}] {overall_progress}% - {track_name}") - + logger.info( + f"Task {task_id} progress: [{current_track}/{total_tracks}] {overall_progress}% - {track_name}" + ) + except (ValueError, IndexError) as e: logger.error(f"Error parsing track numbers '{current_track_raw}': {e}") - + # Ensure correct status - data['status'] = ProgressState.PROGRESS - + data["status"] = ProgressState.PROGRESS + def _handle_real_time(self, task_id, data): """Handle real-time progress status from deezspot""" # Extract track info - title = data.get('title', data.get('song', 'Unknown')) - artist = data.get('artist', 'Unknown') - + title = data.get("title", data.get("song", "Unknown")) + artist = data.get("artist", "Unknown") + # Handle percent formatting - percent = data.get('percent', data.get('percentage', 0)) + percent = data.get("percent", data.get("percentage", 0)) if isinstance(percent, float) and percent <= 1.0: percent = int(percent * 100) - data['percent'] = percent - + data["percent"] = percent + # Calculate download rate if bytes_received is available - if 'bytes_received' in data: - last_update = data.get('last_update_time', data['timestamp']) - bytes_received = data['bytes_received'] - last_bytes = data.get('last_bytes_received', 0) - time_diff = data['timestamp'] - last_update - + if "bytes_received" in data: + last_update = data.get("last_update_time", data["timestamp"]) + bytes_received = data["bytes_received"] + last_bytes = data.get("last_bytes_received", 0) + time_diff = data["timestamp"] - last_update + if time_diff > 0 and bytes_received > last_bytes: bytes_diff = bytes_received - last_bytes download_rate = bytes_diff / time_diff - data['download_rate'] = download_rate - data['last_update_time'] = data['timestamp'] - data['last_bytes_received'] = bytes_received - + data["download_rate"] = download_rate + data["last_update_time"] = data["timestamp"] + data["last_bytes_received"] = bytes_received + # Format download rate for display if download_rate < 1024: - data['download_rate_formatted'] = f"{download_rate:.2f} B/s" + data["download_rate_formatted"] = f"{download_rate:.2f} B/s" elif download_rate < 1024 * 1024: - data['download_rate_formatted'] = f"{download_rate/1024:.2f} KB/s" + data["download_rate_formatted"] = f"{download_rate / 1024:.2f} KB/s" else: - data['download_rate_formatted'] = f"{download_rate/(1024*1024):.2f} MB/s" - + data["download_rate_formatted"] = ( + f"{download_rate / (1024 * 1024):.2f} MB/s" + ) + # Log at debug level logger.debug(f"Task {task_id} track progress: {title} by {artist}: {percent}%") - + # Set appropriate status - data['status'] = ProgressState.REAL_TIME if data.get('status') == "real_time" else ProgressState.TRACK_PROGRESS - + data["status"] = ( + ProgressState.REAL_TIME + if data.get("status") == "real_time" + else ProgressState.TRACK_PROGRESS + ) + def _handle_skipped(self, task_id, data, task_info): """Handle skipped status from deezspot""" # Extract track info - title = data.get('song', 'Unknown') - artist = data.get('artist', 'Unknown') - reason = data.get('reason', 'Unknown reason') - + title = data.get("song", "Unknown") + artist = data.get("artist", "Unknown") + reason = data.get("reason", "Unknown reason") + # Log skip logger.info(f"Task {task_id} skipped: {artist} - {title}") logger.debug(f"Task {task_id} skip reason: {reason}") - + # Update task info - skipped_tracks = task_info.get('skipped_tracks', 0) + 1 - task_info['skipped_tracks'] = skipped_tracks + skipped_tracks = task_info.get("skipped_tracks", 0) + 1 + task_info["skipped_tracks"] = skipped_tracks store_task_info(task_id, task_info) - + # Check if part of album/playlist - parent_type = task_info.get('type', '').lower() - if parent_type in ['album', 'playlist']: - total_tracks = task_info.get('total_tracks', 0) - processed_tracks = task_info.get('completed_tracks', 0) + skipped_tracks - + parent_type = task_info.get("type", "").lower() + if parent_type in ["album", "playlist"]: + total_tracks = task_info.get("total_tracks", 0) + processed_tracks = task_info.get("completed_tracks", 0) + skipped_tracks + if total_tracks > 0: - overall_progress = min(int((processed_tracks / total_tracks) * 100), 100) - + overall_progress = min( + int((processed_tracks / total_tracks) * 100), 100 + ) + # Create parent progress update progress_update = { "status": ProgressState.PROGRESS, "type": parent_type, "track": title, "current_track": f"{processed_tracks}/{total_tracks}", - "album": data.get('album', ''), + "album": data.get("album", ""), "artist": artist, - "timestamp": data['timestamp'], + "timestamp": data["timestamp"], "parsed_current_track": processed_tracks, "parsed_total_tracks": total_tracks, "overall_progress": overall_progress, "track_skipped": True, "skip_reason": reason, - "parent_task": True + "parent_task": True, } - + # Store progress update store_task_status(task_id, progress_update) - + # Set status - data['status'] = ProgressState.SKIPPED - + data["status"] = ProgressState.SKIPPED + def _handle_retrying(self, task_id, data, task_info): """Handle retrying status from deezspot""" # Extract retry info - song = data.get('song', 'Unknown') - artist = data.get('artist', 'Unknown') - retry_count = data.get('retry_count', 0) - seconds_left = data.get('seconds_left', 0) - error = data.get('error', 'Unknown error') - + song = data.get("song", "Unknown") + artist = data.get("artist", "Unknown") + retry_count = data.get("retry_count", 0) + seconds_left = data.get("seconds_left", 0) + error = data.get("error", "Unknown error") + # Log retry - logger.warning(f"Task {task_id} retrying: {artist} - {song} (Attempt {retry_count}, waiting {seconds_left}s)") + logger.warning( + f"Task {task_id} retrying: {artist} - {song} (Attempt {retry_count}, waiting {seconds_left}s)" + ) logger.debug(f"Task {task_id} retry reason: {error}") - + # Update task info - retry_count_total = task_info.get('retry_count', 0) + 1 - task_info['retry_count'] = retry_count_total + retry_count_total = task_info.get("retry_count", 0) + 1 + task_info["retry_count"] = retry_count_total store_task_info(task_id, task_info) - + # Set status - data['status'] = ProgressState.RETRYING - + data["status"] = ProgressState.RETRYING + def _handle_error(self, task_id, data, task_info): """Handle error status from deezspot""" # Extract error info - message = data.get('message', 'Unknown error') - + message = data.get("message", "Unknown error") + # Log error logger.error(f"Task {task_id} error: {message}") - + # Update task info - error_count = task_info.get('error_count', 0) + 1 - task_info['error_count'] = error_count + error_count = task_info.get("error_count", 0) + 1 + task_info["error_count"] = error_count store_task_info(task_id, task_info) - + # Set status and error message - data['status'] = ProgressState.ERROR - data['error'] = message - + data["status"] = ProgressState.ERROR + data["error"] = message + def _handle_done(self, task_id, data, task_info): """Handle done status from deezspot""" # Extract data - content_type = data.get('type', '').lower() - album = data.get('album', '') - artist = data.get('artist', '') - song = data.get('song', '') - + content_type = data.get("type", "").lower() + album = data.get("album", "") + artist = data.get("artist", "") + song = data.get("song", "") + # Handle based on content type - if content_type == 'track': + if content_type == "track": # For track completions if artist and song: logger.info(f"Task {task_id} completed: Track '{song}' by {artist}") else: logger.info(f"Task {task_id} completed: Track '{song}'") - + # Update status to track_complete - data['status'] = ProgressState.TRACK_COMPLETE - + data["status"] = ProgressState.TRACK_COMPLETE + # Update task info - completed_tracks = task_info.get('completed_tracks', 0) + 1 - task_info['completed_tracks'] = completed_tracks + completed_tracks = task_info.get("completed_tracks", 0) + 1 + task_info["completed_tracks"] = completed_tracks store_task_info(task_id, task_info) - + # If part of album/playlist, update progress - parent_type = task_info.get('type', '').lower() - if parent_type in ['album', 'playlist']: - total_tracks = task_info.get('total_tracks', 0) + parent_type = task_info.get("type", "").lower() + if parent_type in ["album", "playlist"]: + total_tracks = task_info.get("total_tracks", 0) if total_tracks > 0: completion_percent = int((completed_tracks / total_tracks) * 100) - + # Create progress update progress_update = { "status": ProgressState.PROGRESS, @@ -825,81 +955,115 @@ class ProgressTrackingTask(Task): "current_track": f"{completed_tracks}/{total_tracks}", "album": album, "artist": artist, - "timestamp": data['timestamp'], + "timestamp": data["timestamp"], "parsed_current_track": completed_tracks, "parsed_total_tracks": total_tracks, "overall_progress": completion_percent, "track_complete": True, - "parent_task": True + "parent_task": True, } - + # Store progress update store_task_status(task_id, progress_update) - - elif content_type in ['album', 'playlist']: + + elif content_type in ["album", "playlist"]: # Get completion counts - completed_tracks = task_info.get('completed_tracks', 0) - skipped_tracks = task_info.get('skipped_tracks', 0) - error_count = task_info.get('error_count', 0) - + completed_tracks = task_info.get("completed_tracks", 0) + skipped_tracks = task_info.get("skipped_tracks", 0) + error_count = task_info.get("error_count", 0) + # Log completion if album and artist: - logger.info(f"Task {task_id} completed: {content_type.upper()} '{album}' by {artist}") + logger.info( + f"Task {task_id} completed: {content_type.upper()} '{album}' by {artist}" + ) elif album: - logger.info(f"Task {task_id} completed: {content_type.upper()} '{album}'") + logger.info( + f"Task {task_id} completed: {content_type.upper()} '{album}'" + ) else: - name = data.get('name', '') + name = data.get("name", "") if name: - logger.info(f"Task {task_id} completed: {content_type.upper()} '{name}'") + logger.info( + f"Task {task_id} completed: {content_type.upper()} '{name}'" + ) else: logger.info(f"Task {task_id} completed: {content_type.upper()}") - + # Add summary data["status"] = ProgressState.COMPLETE - data["message"] = f"Download complete: {completed_tracks} tracks downloaded, {skipped_tracks} skipped" - + data["message"] = ( + f"Download complete: {completed_tracks} tracks downloaded, {skipped_tracks} skipped" + ) + # Log summary - logger.info(f"Task {task_id} summary: {completed_tracks} completed, {skipped_tracks} skipped, {error_count} errors") + logger.info( + f"Task {task_id} summary: {completed_tracks} completed, {skipped_tracks} skipped, {error_count} errors" + ) # Schedule deletion for completed multi-track downloads delayed_delete_task_data.apply_async( args=[task_id, "Task completed successfully and auto-cleaned."], - countdown=30 # Delay in seconds + countdown=30, # Delay in seconds ) - + # If from playlist_watch and successful, add track to DB original_request = task_info.get("original_request", {}) - if original_request.get("source") == "playlist_watch" and task_info.get("download_type") == "track": # ensure it's a track for playlist + if ( + original_request.get("source") == "playlist_watch" + and task_info.get("download_type") == "track" + ): # ensure it's a track for playlist playlist_id = original_request.get("playlist_id") track_item_for_db = original_request.get("track_item_for_db") - - if playlist_id and track_item_for_db and track_item_for_db.get('track'): - logger.info(f"Task {task_id} was from playlist watch for playlist {playlist_id}. Adding track to DB.") + + if playlist_id and track_item_for_db and track_item_for_db.get("track"): + logger.info( + f"Task {task_id} was from playlist watch for playlist {playlist_id}. Adding track to DB." + ) try: add_single_track_to_playlist_db(playlist_id, track_item_for_db) except Exception as db_add_err: - logger.error(f"Failed to add track to DB for playlist {playlist_id} after successful download task {task_id}: {db_add_err}", exc_info=True) + logger.error( + f"Failed to add track to DB for playlist {playlist_id} after successful download task {task_id}: {db_add_err}", + exc_info=True, + ) else: - logger.warning(f"Task {task_id} was from playlist_watch but missing playlist_id or track_item_for_db for DB update. Original Request: {original_request}") + logger.warning( + f"Task {task_id} was from playlist_watch but missing playlist_id or track_item_for_db for DB update. Original Request: {original_request}" + ) # If from artist_watch and successful, update album in DB - if original_request.get("source") == "artist_watch" and task_info.get("download_type") == "album": + if ( + original_request.get("source") == "artist_watch" + and task_info.get("download_type") == "album" + ): artist_spotify_id = original_request.get("artist_spotify_id") album_data_for_db = original_request.get("album_data_for_db") - if artist_spotify_id and album_data_for_db and album_data_for_db.get("id"): + if ( + artist_spotify_id + and album_data_for_db + and album_data_for_db.get("id") + ): album_spotify_id = album_data_for_db.get("id") - logger.info(f"Task {task_id} was from artist watch for artist {artist_spotify_id}, album {album_spotify_id}. Updating album in DB as complete.") + logger.info( + f"Task {task_id} was from artist watch for artist {artist_spotify_id}, album {album_spotify_id}. Updating album in DB as complete." + ) try: add_or_update_album_for_artist( artist_spotify_id=artist_spotify_id, album_data=album_data_for_db, - task_id=task_id, - is_download_complete=True + task_id=task_id, + is_download_complete=True, ) except Exception as db_update_err: - logger.error(f"Failed to update album {album_spotify_id} in DB for artist {artist_spotify_id} after successful download task {task_id}: {db_update_err}", exc_info=True) + logger.error( + f"Failed to update album {album_spotify_id} in DB for artist {artist_spotify_id} after successful download task {task_id}: {db_update_err}", + exc_info=True, + ) else: - logger.warning(f"Task {task_id} was from artist_watch (album) but missing key data (artist_spotify_id or album_data_for_db) for DB update. Original Request: {original_request}") + logger.warning( + f"Task {task_id} was from artist_watch (album) but missing key data (artist_spotify_id or album_data_for_db) for DB update. Original Request: {original_request}" + ) else: # Generic done for other types @@ -907,104 +1071,166 @@ class ProgressTrackingTask(Task): data["status"] = ProgressState.COMPLETE data["message"] = "Download complete" + # Celery signal handlers @task_prerun.connect def task_prerun_handler(task_id=None, task=None, *args, **kwargs): """Signal handler when a task begins running""" try: task_info = get_task_info(task_id) - + # Update task status to processing - store_task_status(task_id, { - "status": ProgressState.PROCESSING, - "timestamp": time.time(), - "type": task_info.get("type", "unknown"), - "name": task_info.get("name", "Unknown"), - "artist": task_info.get("artist", "") - }) - - logger.info(f"Task {task_id} started processing: {task_info.get('name', 'Unknown')}") + store_task_status( + task_id, + { + "status": ProgressState.PROCESSING, + "timestamp": time.time(), + "type": task_info.get("type", "unknown"), + "name": task_info.get("name", "Unknown"), + "artist": task_info.get("artist", ""), + }, + ) + + logger.info( + f"Task {task_id} started processing: {task_info.get('name', 'Unknown')}" + ) except Exception as e: logger.error(f"Error in task_prerun_handler: {e}") + @task_postrun.connect -def task_postrun_handler(task_id=None, task=None, retval=None, state=None, *args, **kwargs): +def task_postrun_handler( + task_id=None, task=None, retval=None, state=None, *args, **kwargs +): """Signal handler when a task finishes""" try: # Define download task names download_task_names = ["download_track", "download_album", "download_playlist"] last_status_for_history = get_last_task_status(task_id) - if last_status_for_history and last_status_for_history.get("status") in [ProgressState.COMPLETE, ProgressState.ERROR, ProgressState.CANCELLED, "ERROR_RETRIED", "ERROR_AUTO_CLEANED"]: - if state == states.REVOKED and last_status_for_history.get("status") != ProgressState.CANCELLED: - logger.info(f"Task {task_id} was REVOKED (likely cancelled), logging to history.") - if task and task.name in download_task_names: # Check if it's a download task - _log_task_to_history(task_id, 'CANCELLED', "Task was revoked/cancelled.") + if last_status_for_history and last_status_for_history.get("status") in [ + ProgressState.COMPLETE, + ProgressState.ERROR, + ProgressState.CANCELLED, + "ERROR_RETRIED", + "ERROR_AUTO_CLEANED", + ]: + if ( + state == states.REVOKED + and last_status_for_history.get("status") != ProgressState.CANCELLED + ): + logger.info( + f"Task {task_id} was REVOKED (likely cancelled), logging to history." + ) + if ( + task and task.name in download_task_names + ): # Check if it's a download task + _log_task_to_history( + task_id, "CANCELLED", "Task was revoked/cancelled." + ) # return # Let status update proceed if necessary task_info = get_task_info(task_id) - current_redis_status = last_status_for_history.get("status") if last_status_for_history else None + current_redis_status = ( + last_status_for_history.get("status") if last_status_for_history else None + ) if state == states.SUCCESS: if current_redis_status != ProgressState.COMPLETE: - store_task_status(task_id, { - "status": ProgressState.COMPLETE, - "timestamp": time.time(), - "type": task_info.get("type", "unknown"), - "name": task_info.get("name", "Unknown"), - "artist": task_info.get("artist", ""), - "message": "Download completed successfully." - }) - logger.info(f"Task {task_id} completed successfully: {task_info.get('name', 'Unknown')}") - if task and task.name in download_task_names: # Check if it's a download task - _log_task_to_history(task_id, 'COMPLETED') + store_task_status( + task_id, + { + "status": ProgressState.COMPLETE, + "timestamp": time.time(), + "type": task_info.get("type", "unknown"), + "name": task_info.get("name", "Unknown"), + "artist": task_info.get("artist", ""), + "message": "Download completed successfully.", + }, + ) + logger.info( + f"Task {task_id} completed successfully: {task_info.get('name', 'Unknown')}" + ) + if ( + task and task.name in download_task_names + ): # Check if it's a download task + _log_task_to_history(task_id, "COMPLETED") - if task_info.get("download_type") == "track": # Applies to single track downloads and tracks from playlists/albums + if ( + task_info.get("download_type") == "track" + ): # Applies to single track downloads and tracks from playlists/albums delayed_delete_task_data.apply_async( args=[task_id, "Task completed successfully and auto-cleaned."], - countdown=30 + countdown=30, ) original_request = task_info.get("original_request", {}) # Handle successful track from playlist watch - if original_request.get("source") == "playlist_watch" and task_info.get("download_type") == "track": + if ( + original_request.get("source") == "playlist_watch" + and task_info.get("download_type") == "track" + ): playlist_id = original_request.get("playlist_id") track_item_for_db = original_request.get("track_item_for_db") - - if playlist_id and track_item_for_db and track_item_for_db.get('track'): - logger.info(f"Task {task_id} was from playlist watch for playlist {playlist_id}. Adding track to DB.") + + if playlist_id and track_item_for_db and track_item_for_db.get("track"): + logger.info( + f"Task {task_id} was from playlist watch for playlist {playlist_id}. Adding track to DB." + ) try: add_single_track_to_playlist_db(playlist_id, track_item_for_db) except Exception as db_add_err: - logger.error(f"Failed to add track to DB for playlist {playlist_id} after successful download task {task_id}: {db_add_err}", exc_info=True) + logger.error( + f"Failed to add track to DB for playlist {playlist_id} after successful download task {task_id}: {db_add_err}", + exc_info=True, + ) else: - logger.warning(f"Task {task_id} was from playlist_watch but missing playlist_id or track_item_for_db for DB update. Original Request: {original_request}") - + logger.warning( + f"Task {task_id} was from playlist_watch but missing playlist_id or track_item_for_db for DB update. Original Request: {original_request}" + ) + # Handle successful album from artist watch - if original_request.get("source") == "artist_watch" and task_info.get("download_type") == "album": + if ( + original_request.get("source") == "artist_watch" + and task_info.get("download_type") == "album" + ): artist_spotify_id = original_request.get("artist_spotify_id") album_data_for_db = original_request.get("album_data_for_db") - if artist_spotify_id and album_data_for_db and album_data_for_db.get("id"): + if ( + artist_spotify_id + and album_data_for_db + and album_data_for_db.get("id") + ): album_spotify_id = album_data_for_db.get("id") - logger.info(f"Task {task_id} was from artist watch for artist {artist_spotify_id}, album {album_spotify_id}. Updating album in DB as complete.") + logger.info( + f"Task {task_id} was from artist watch for artist {artist_spotify_id}, album {album_spotify_id}. Updating album in DB as complete." + ) try: add_or_update_album_for_artist( artist_spotify_id=artist_spotify_id, album_data=album_data_for_db, - task_id=task_id, - is_download_complete=True + task_id=task_id, + is_download_complete=True, ) except Exception as db_update_err: - logger.error(f"Failed to update album {album_spotify_id} in DB for artist {artist_spotify_id} after successful download task {task_id}: {db_update_err}", exc_info=True) + logger.error( + f"Failed to update album {album_spotify_id} in DB for artist {artist_spotify_id} after successful download task {task_id}: {db_update_err}", + exc_info=True, + ) else: - logger.warning(f"Task {task_id} was from artist_watch (album) but missing key data (artist_spotify_id or album_data_for_db) for DB update. Original Request: {original_request}") + logger.warning( + f"Task {task_id} was from artist_watch (album) but missing key data (artist_spotify_id or album_data_for_db) for DB update. Original Request: {original_request}" + ) except Exception as e: logger.error(f"Error in task_postrun_handler: {e}", exc_info=True) + @task_failure.connect -def task_failure_handler(task_id=None, exception=None, traceback=None, sender=None, *args, **kwargs): +def task_failure_handler( + task_id=None, exception=None, traceback=None, sender=None, *args, **kwargs +): """Signal handler when a task fails""" try: # Skip if Retry exception @@ -1013,60 +1239,71 @@ def task_failure_handler(task_id=None, exception=None, traceback=None, sender=No # Define download task names download_task_names = ["download_track", "download_album", "download_playlist"] - + # Get task info and status task_info = get_task_info(task_id) last_status = get_last_task_status(task_id) - + # Get retry count retry_count = 0 if last_status: retry_count = last_status.get("retry_count", 0) - + # Get retry configuration config_params = get_config_params() - max_retries = config_params.get('maxRetries', 3) - + max_retries = config_params.get("maxRetries", 3) + # Check if we can retry can_retry = retry_count < max_retries - + # Update task status to error in Redis if not already an error if last_status and last_status.get("status") != ProgressState.ERROR: - store_task_status(task_id, { - "status": ProgressState.ERROR, - "timestamp": time.time(), - "type": task_info.get("type", "unknown"), - "name": task_info.get("name", "Unknown"), - "artist": task_info.get("artist", ""), - "error": str(exception), - "traceback": str(traceback), - "can_retry": can_retry, - "retry_count": retry_count, - "max_retries": max_retries - }) - + store_task_status( + task_id, + { + "status": ProgressState.ERROR, + "timestamp": time.time(), + "type": task_info.get("type", "unknown"), + "name": task_info.get("name", "Unknown"), + "artist": task_info.get("artist", ""), + "error": str(exception), + "traceback": str(traceback), + "can_retry": can_retry, + "retry_count": retry_count, + "max_retries": max_retries, + }, + ) + logger.error(f"Task {task_id} failed: {str(exception)}") - if sender and sender.name in download_task_names: # Check if it's a download task - _log_task_to_history(task_id, 'ERROR', str(exception)) + if ( + sender and sender.name in download_task_names + ): # Check if it's a download task + _log_task_to_history(task_id, "ERROR", str(exception)) if can_retry: logger.info(f"Task {task_id} can be retried ({retry_count}/{max_retries})") else: # If task cannot be retried, schedule its data for deletion - logger.info(f"Task {task_id} failed and cannot be retried. Data scheduled for deletion in 30s.") + logger.info( + f"Task {task_id} failed and cannot be retried. Data scheduled for deletion in 30s." + ) delayed_delete_task_data.apply_async( - args=[task_id, f"Task failed ({str(exception)}) and max retries reached. Auto-cleaned."], - countdown=30 + args=[ + task_id, + f"Task failed ({str(exception)}) and max retries reached. Auto-cleaned.", + ], + countdown=30, ) except Exception as e: logger.error(f"Error in task_failure_handler: {e}") + @worker_ready.connect def worker_ready_handler(**kwargs): """Signal handler when a worker starts up""" logger.info("Celery worker ready and listening for tasks") - + # Check Redis connection try: redis_client.ping() @@ -1074,26 +1311,31 @@ def worker_ready_handler(**kwargs): except Exception as e: logger.error(f"Redis connection failed: {e}") + # Define the download tasks -@celery_app.task(bind=True, base=ProgressTrackingTask, name="download_track", queue="downloads") +@celery_app.task( + bind=True, base=ProgressTrackingTask, name="download_track", queue="downloads" +) def download_track(self, **task_data): """ Task to download a track - + Args: **task_data: Dictionary containing all task parameters """ try: - logger.info(f"Processing track download task: {task_data.get('name', 'Unknown')}") + logger.info( + f"Processing track download task: {task_data.get('name', 'Unknown')}" + ) from routes.utils.track import download_track as download_track_func - + # Get config parameters config_params = get_config_params() service = config_params.get("service") fallback_enabled = config_params.get("fallback", False) - + # Determine service parameters - if service == 'spotify': + if service == "spotify": if fallback_enabled: main = config_params.get("deezer", "") fallback = config_params.get("spotify", "") @@ -1104,7 +1346,7 @@ def download_track(self, **task_data): fallback = None quality = config_params.get("spotifyQuality", "NORMAL") fall_quality = None - elif service == 'deezer': + elif service == "deezer": main = config_params.get("deezer", "") fallback = None quality = config_params.get("deezerQuality", "MP3_128") @@ -1114,17 +1356,25 @@ def download_track(self, **task_data): fallback = None quality = config_params.get("spotifyQuality", "NORMAL") fall_quality = None - + # Get remaining parameters url = task_data.get("url", "") real_time = task_data.get("real_time", config_params.get("realTime", False)) - custom_dir_format = task_data.get("custom_dir_format", config_params.get("customDirFormat", "%ar_album%/%album%")) - custom_track_format = task_data.get("custom_track_format", config_params.get("customTrackFormat", "%tracknum%. %music%")) - pad_tracks = task_data.get("pad_tracks", config_params.get("tracknum_padding", True)) + custom_dir_format = task_data.get( + "custom_dir_format", + config_params.get("customDirFormat", "%ar_album%/%album%"), + ) + custom_track_format = task_data.get( + "custom_track_format", + config_params.get("customTrackFormat", "%tracknum%. %music%"), + ) + pad_tracks = task_data.get( + "pad_tracks", config_params.get("tracknum_padding", True) + ) save_cover = task_data.get("save_cover", config_params.get("save_cover", True)) convert_to = task_data.get("convertTo", config_params.get("convertTo")) bitrate = task_data.get("bitrate", config_params.get("bitrate")) - + # Execute the download - service is now determined from URL download_track_func( url=url, @@ -1139,34 +1389,39 @@ def download_track(self, **task_data): save_cover=save_cover, progress_callback=self.progress_callback, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, ) - + return {"status": "success", "message": "Track download completed"} except Exception as e: logger.error(f"Error in download_track task: {e}") traceback.print_exc() raise -@celery_app.task(bind=True, base=ProgressTrackingTask, name="download_album", queue="downloads") + +@celery_app.task( + bind=True, base=ProgressTrackingTask, name="download_album", queue="downloads" +) def download_album(self, **task_data): """ Task to download an album - + Args: **task_data: Dictionary containing all task parameters """ try: - logger.info(f"Processing album download task: {task_data.get('name', 'Unknown')}") + logger.info( + f"Processing album download task: {task_data.get('name', 'Unknown')}" + ) from routes.utils.album import download_album as download_album_func - + # Get config parameters config_params = get_config_params() service = config_params.get("service") fallback_enabled = config_params.get("fallback", False) - + # Determine service parameters - if service == 'spotify': + if service == "spotify": if fallback_enabled: main = config_params.get("deezer", "") fallback = config_params.get("spotify", "") @@ -1177,7 +1432,7 @@ def download_album(self, **task_data): fallback = None quality = config_params.get("spotifyQuality", "NORMAL") fall_quality = None - elif service == 'deezer': + elif service == "deezer": main = config_params.get("deezer", "") fallback = None quality = config_params.get("deezerQuality", "MP3_128") @@ -1187,17 +1442,25 @@ def download_album(self, **task_data): fallback = None quality = config_params.get("spotifyQuality", "NORMAL") fall_quality = None - + # Get remaining parameters url = task_data.get("url", "") real_time = task_data.get("real_time", config_params.get("realTime", False)) - custom_dir_format = task_data.get("custom_dir_format", config_params.get("customDirFormat", "%ar_album%/%album%")) - custom_track_format = task_data.get("custom_track_format", config_params.get("customTrackFormat", "%tracknum%. %music%")) - pad_tracks = task_data.get("pad_tracks", config_params.get("tracknum_padding", True)) + custom_dir_format = task_data.get( + "custom_dir_format", + config_params.get("customDirFormat", "%ar_album%/%album%"), + ) + custom_track_format = task_data.get( + "custom_track_format", + config_params.get("customTrackFormat", "%tracknum%. %music%"), + ) + pad_tracks = task_data.get( + "pad_tracks", config_params.get("tracknum_padding", True) + ) save_cover = task_data.get("save_cover", config_params.get("save_cover", True)) convert_to = task_data.get("convertTo", config_params.get("convertTo")) bitrate = task_data.get("bitrate", config_params.get("bitrate")) - + # Execute the download - service is now determined from URL download_album_func( url=url, @@ -1212,34 +1475,39 @@ def download_album(self, **task_data): save_cover=save_cover, progress_callback=self.progress_callback, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, ) - + return {"status": "success", "message": "Album download completed"} except Exception as e: logger.error(f"Error in download_album task: {e}") traceback.print_exc() raise -@celery_app.task(bind=True, base=ProgressTrackingTask, name="download_playlist", queue="downloads") + +@celery_app.task( + bind=True, base=ProgressTrackingTask, name="download_playlist", queue="downloads" +) def download_playlist(self, **task_data): """ Task to download a playlist - + Args: **task_data: Dictionary containing all task parameters """ try: - logger.info(f"Processing playlist download task: {task_data.get('name', 'Unknown')}") + logger.info( + f"Processing playlist download task: {task_data.get('name', 'Unknown')}" + ) from routes.utils.playlist import download_playlist as download_playlist_func - + # Get config parameters config_params = get_config_params() service = config_params.get("service") fallback_enabled = config_params.get("fallback", False) - + # Determine service parameters - if service == 'spotify': + if service == "spotify": if fallback_enabled: main = config_params.get("deezer", "") fallback = config_params.get("spotify", "") @@ -1250,7 +1518,7 @@ def download_playlist(self, **task_data): fallback = None quality = config_params.get("spotifyQuality", "NORMAL") fall_quality = None - elif service == 'deezer': + elif service == "deezer": main = config_params.get("deezer", "") fallback = None quality = config_params.get("deezerQuality", "MP3_128") @@ -1260,22 +1528,34 @@ def download_playlist(self, **task_data): fallback = None quality = config_params.get("spotifyQuality", "NORMAL") fall_quality = None - + # Get remaining parameters url = task_data.get("url", "") real_time = task_data.get("real_time", config_params.get("realTime", False)) - custom_dir_format = task_data.get("custom_dir_format", config_params.get("customDirFormat", "%ar_album%/%album%")) - custom_track_format = task_data.get("custom_track_format", config_params.get("customTrackFormat", "%tracknum%. %music%")) - pad_tracks = task_data.get("pad_tracks", config_params.get("tracknum_padding", True)) + custom_dir_format = task_data.get( + "custom_dir_format", + config_params.get("customDirFormat", "%ar_album%/%album%"), + ) + custom_track_format = task_data.get( + "custom_track_format", + config_params.get("customTrackFormat", "%tracknum%. %music%"), + ) + pad_tracks = task_data.get( + "pad_tracks", config_params.get("tracknum_padding", True) + ) save_cover = task_data.get("save_cover", config_params.get("save_cover", True)) convert_to = task_data.get("convertTo", config_params.get("convertTo")) bitrate = task_data.get("bitrate", config_params.get("bitrate")) - + # Get retry parameters - initial_retry_delay = task_data.get("initial_retry_delay", config_params.get("retryDelaySeconds", 5)) - retry_delay_increase = task_data.get("retry_delay_increase", config_params.get("retry_delay_increase", 5)) + initial_retry_delay = task_data.get( + "initial_retry_delay", config_params.get("retryDelaySeconds", 5) + ) + retry_delay_increase = task_data.get( + "retry_delay_increase", config_params.get("retry_delay_increase", 5) + ) max_retries = task_data.get("max_retries", config_params.get("maxRetries", 3)) - + # Execute the download - service is now determined from URL download_playlist_func( url=url, @@ -1293,14 +1573,15 @@ def download_playlist(self, **task_data): max_retries=max_retries, progress_callback=self.progress_callback, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, ) - + return {"status": "success", "message": "Playlist download completed"} except Exception as e: logger.error(f"Error in download_playlist task: {e}") traceback.print_exc() - raise + raise + # Helper function to fully delete task data from Redis def delete_task_data_and_log(task_id, reason="Task data deleted"): @@ -1308,54 +1589,76 @@ def delete_task_data_and_log(task_id, reason="Task data deleted"): Marks a task as cancelled (if not already) and deletes all its data from Redis. """ try: - task_info = get_task_info(task_id) # Get info before deleting + task_info = get_task_info(task_id) # Get info before deleting last_status = get_last_task_status(task_id) current_status_val = last_status.get("status") if last_status else None # Determine the final status for Redis before deletion # The reason passed to this function indicates why it's being deleted. - final_redis_status = ProgressState.ERROR_AUTO_CLEANED # Default for most cleanup scenarios + final_redis_status = ( + ProgressState.ERROR_AUTO_CLEANED + ) # Default for most cleanup scenarios error_message_for_status = reason if reason == "Task completed successfully and auto-cleaned.": - final_redis_status = ProgressState.COMPLETE # It was already complete + final_redis_status = ProgressState.COMPLETE # It was already complete error_message_for_status = "Task completed and auto-cleaned." elif reason == "Task cancelled by user and auto-cleaned.": - final_redis_status = ProgressState.CANCELLED # It was already cancelled + final_redis_status = ProgressState.CANCELLED # It was already cancelled error_message_for_status = "Task cancelled and auto-cleaned." elif "Task failed" in reason and "max retries reached" in reason: - final_redis_status = ProgressState.ERROR # It was already an error (non-retryable) + final_redis_status = ( + ProgressState.ERROR + ) # It was already an error (non-retryable) error_message_for_status = reason elif reason == "Task interrupted by application restart and auto-cleaned.": - final_redis_status = ProgressState.ERROR # It was marked as ERROR (interrupted) + final_redis_status = ( + ProgressState.ERROR + ) # It was marked as ERROR (interrupted) error_message_for_status = reason # Add more specific conditions if needed based on other reasons `delayed_delete_task_data` might be called with. # Update Redis status one last time if it's not already reflecting the final intended state for this cleanup. # This is mainly for cases where cleanup is initiated for tasks not yet in a fully terminal state by other handlers. - if current_status_val not in [ProgressState.COMPLETE, ProgressState.CANCELLED, ProgressState.ERROR_RETRIED, ProgressState.ERROR_AUTO_CLEANED, final_redis_status]: - store_task_status(task_id, { - "status": final_redis_status, - "error": error_message_for_status, # Use the reason as the error/message for this status - "timestamp": time.time() - }) + if current_status_val not in [ + ProgressState.COMPLETE, + ProgressState.CANCELLED, + ProgressState.ERROR_RETRIED, + ProgressState.ERROR_AUTO_CLEANED, + final_redis_status, + ]: + store_task_status( + task_id, + { + "status": final_redis_status, + "error": error_message_for_status, # Use the reason as the error/message for this status + "timestamp": time.time(), + }, + ) # History logging for COMPLETION, CANCELLATION, or definitive ERROR should have occurred when those states were first reached. # If this cleanup is for a task that *wasn't* in such a state (e.g. stale, still processing), log it now. if final_redis_status == ProgressState.ERROR_AUTO_CLEANED: - _log_task_to_history(task_id, 'ERROR', error_message_for_status) # Or a more specific status if desired + _log_task_to_history( + task_id, "ERROR", error_message_for_status + ) # Or a more specific status if desired # Delete Redis keys associated with the task redis_client.delete(f"task:{task_id}:info") redis_client.delete(f"task:{task_id}:status") redis_client.delete(f"task:{task_id}:status:next_id") - - logger.info(f"Data for task {task_id} ('{task_info.get('name', 'Unknown')}') deleted from Redis. Reason: {reason}") + + logger.info( + f"Data for task {task_id} ('{task_info.get('name', 'Unknown')}') deleted from Redis. Reason: {reason}" + ) return True except Exception as e: logger.error(f"Error deleting task data for {task_id}: {e}", exc_info=True) return False -@celery_app.task(name="cleanup_stale_errors", queue="utility_tasks") # Put on utility_tasks queue + +@celery_app.task( + name="cleanup_stale_errors", queue="utility_tasks" +) # Put on utility_tasks queue def cleanup_stale_errors(): """ Periodically checks for tasks in ERROR state for more than 1 minute and cleans them up. @@ -1372,7 +1675,7 @@ def cleanup_stale_errors(): stale_threshold = 60 # 1 minute for key_bytes in task_keys: - task_id = key_bytes.decode('utf-8').split(':')[1] + task_id = key_bytes.decode("utf-8").split(":")[1] last_status = get_last_task_status(task_id) if last_status and last_status.get("status") == ProgressState.ERROR: @@ -1380,24 +1683,40 @@ def cleanup_stale_errors(): if (current_time - error_timestamp) > stale_threshold: # Check again to ensure it wasn't retried just before cleanup current_last_status_before_delete = get_last_task_status(task_id) - if current_last_status_before_delete and current_last_status_before_delete.get("status") == ProgressState.ERROR_RETRIED: - logger.info(f"Task {task_id} was retried just before cleanup. Skipping delete.") + if ( + current_last_status_before_delete + and current_last_status_before_delete.get("status") + == ProgressState.ERROR_RETRIED + ): + logger.info( + f"Task {task_id} was retried just before cleanup. Skipping delete." + ) continue - - logger.info(f"Task {task_id} is in ERROR state for more than {stale_threshold}s. Cleaning up.") - if delete_task_data_and_log(task_id, reason=f"Auto-cleaned: Task was in ERROR state for over {stale_threshold} seconds without manual retry."): + + logger.info( + f"Task {task_id} is in ERROR state for more than {stale_threshold}s. Cleaning up." + ) + if delete_task_data_and_log( + task_id, + reason=f"Auto-cleaned: Task was in ERROR state for over {stale_threshold} seconds without manual retry.", + ): cleaned_count += 1 - - logger.info(f"cleanup_stale_errors task finished. Cleaned up {cleaned_count} stale errored tasks.") + + logger.info( + f"cleanup_stale_errors task finished. Cleaned up {cleaned_count} stale errored tasks." + ) return {"status": "complete", "cleaned_count": cleaned_count} except Exception as e: logger.error(f"Error during cleanup_stale_errors: {e}", exc_info=True) return {"status": "error", "error": str(e)} -@celery_app.task(name="delayed_delete_task_data", queue="utility_tasks") # Use utility_tasks queue + +@celery_app.task( + name="delayed_delete_task_data", queue="utility_tasks" +) # Use utility_tasks queue def delayed_delete_task_data(task_id, reason): """ Celery task to delete task data after a delay. """ logger.info(f"Executing delayed deletion for task {task_id}. Reason: {reason}") - delete_task_data_and_log(task_id, reason) \ No newline at end of file + delete_task_data_and_log(task_id, reason) From 3971dba9bffa90eaee442f107340b764f37a1743 Mon Sep 17 00:00:00 2001 From: Mustafa Soylu Date: Sat, 7 Jun 2025 18:46:50 +0200 Subject: [PATCH 3/4] add pre-commit config --- .pre-commit-config.yaml | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..b6d23af --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,41 @@ +repos: + # Various general + format-specific helpers + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: check-symlinks + - id: trailing-whitespace + - id: mixed-line-ending + args: [--fix=lf] + - id: check-yaml + exclude: 'mkdocs.yml' + - id: check-toml + - id: check-json + - id: check-ast + - id: debug-statements + - id: check-merge-conflict + - id: check-shebang-scripts-are-executable + - id: check-added-large-files + args: [--maxkb=10000] + - repo: https://github.com/python-jsonschema/check-jsonschema + rev: '0.33.0' + hooks: + - id: check-github-workflows + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.11.13 + hooks: + # Run the linter. + - id: ruff + types_or: [python, pyi, jupyter] + args: [--fix] + # Run the formatter. + - id: ruff-format + types_or: [python, pyi, jupyter] + - repo: https://github.com/pre-commit/mirrors-mypy + rev: 'v1.16.0' + hooks: + - id: mypy + args: [--no-strict-optional, --ignore-missing-imports] + # NOTE: you might need to add some deps here: + additional_dependencies: [waitress==3.0.2, types-waitress] From 62cbeeb51393f0be4abb85fe553789e1dc27dfcd Mon Sep 17 00:00:00 2001 From: Mustafa Soylu Date: Sat, 7 Jun 2025 18:47:18 +0200 Subject: [PATCH 4/4] complete overhaul with pre-commit hooks --- .env.example | 2 +- README.md | 6 +- app.py | 4 +- docker-compose.yaml | 2 +- routes/__init__.py | 18 +- routes/album.py | 155 +++-- routes/artist.py | 409 ++++++++---- routes/config.py | 139 ++-- routes/credentials.py | 189 ++++-- routes/history.py | 49 +- routes/playlist.py | 478 +++++++++----- routes/prgs.py | 169 +++-- routes/search.py | 57 +- routes/track.py | 183 +++--- routes/utils/album.py | 176 +++-- routes/utils/artist.py | 244 ++++--- routes/utils/celery_config.py | 147 +++-- routes/utils/celery_manager.py | 17 - routes/utils/celery_queue_manager.py | 418 ++++++------ routes/utils/celery_tasks.py | 14 +- routes/utils/credentials.py | 443 ++++++++----- routes/utils/get_info.py | 62 +- routes/utils/history_manager.py | 300 +++++---- routes/utils/playlist.py | 181 +++-- routes/utils/search.py | 62 +- routes/utils/track.py | 202 +++--- routes/utils/watch/db.py | 942 ++++++++++++++++++--------- routes/utils/watch/manager.py | 476 +++++++++----- src/js/album.ts | 26 +- src/js/artist.ts | 62 +- src/js/config.ts | 120 ++-- src/js/history.ts | 6 +- src/js/main.ts | 116 ++-- src/js/playlist.ts | 56 +- src/js/queue.ts | 576 ++++++++-------- src/js/track.ts | 24 +- src/js/watch.ts | 44 +- static/css/album/album.css | 8 +- static/css/artist/artist.css | 40 +- static/css/config/config.css | 46 +- static/css/history/history.css | 2 +- static/css/main/base.css | 36 +- static/css/main/icons.css | 2 +- static/css/main/main.css | 26 +- static/css/playlist/playlist.css | 22 +- static/css/queue/queue.css | 22 +- static/css/track/track.css | 4 +- static/css/watch/watch.css | 8 +- static/html/album.html | 8 +- static/html/artist.html | 8 +- static/html/config.html | 24 +- static/html/history.html | 6 +- static/html/main.html | 16 +- static/html/playlist.html | 8 +- static/html/track.html | 8 +- static/html/watch.html | 2 +- static/images/arrow-left.svg | 2 +- static/images/binoculars.svg | 24 +- static/images/cross.svg | 8 +- static/images/download.svg | 28 +- static/images/eye-crossed.svg | 10 +- static/images/eye.svg | 8 +- static/images/history.svg | 8 +- static/images/info.svg | 13 +- static/images/missing.svg | 5 +- static/images/music.svg | 2 +- static/images/plus-circle.svg | 6 +- static/images/queue-empty.svg | 10 +- static/images/refresh.svg | 8 +- static/images/search.svg | 2 +- tsconfig.json | 16 +- 71 files changed, 4200 insertions(+), 2820 deletions(-) diff --git a/.env.example b/.env.example index 20ecd23..303d5a9 100644 --- a/.env.example +++ b/.env.example @@ -16,4 +16,4 @@ PUID=1000 PGID=1000 # Optional: Sets the default file permissions for newly created files within the container. -UMASK=0022 \ No newline at end of file +UMASK=0022 \ No newline at end of file diff --git a/README.md b/README.md index dfc5cd2..b98e998 100755 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ Music downloader which combines the best of two worlds: Spotify's catalog and De *It will first try to download each track from Deezer and only if it fails, will grab it from Spotify **Only for spotify. For each track, it matches its length with the time it takes to download it -***Restrictions per account tier apply (see +***Restrictions per account tier apply (see ## Prerequisites @@ -120,7 +120,7 @@ Copy that value and paste it into the correspondant setting in Spotizerr - **Multiple Accounts**: - Manage credentials in settings - Switch active accounts per service - + - **Quality selector** - For spotify: OGG 96k, 160k and 320k (premium only) - For deezer: MP3 128k, MP3 320k (sometimes premium, it varies) and FLAC (premium only) @@ -132,7 +132,7 @@ Copy that value and paste it into the correspondant setting in Spotizerr - **Watching artits/playlists** - Start watching a spotify playlist and its tracks will be downloaded dynamically as it updates. - Start watching a spotify artist and their albums will be automatically downloaded, never miss a release! - + ## Troubleshooting **Common Issues**: diff --git a/app.py b/app.py index 3e17a4a..3a54956 100755 --- a/app.py +++ b/app.py @@ -251,8 +251,8 @@ if __name__ == "__main__": # Set file permissions for log files if needed try: os.chmod(log_handler.baseFilename, 0o666) - except: - logging.warning("Could not set permissions on log file") + except (OSError, FileNotFoundError) as e: + logging.warning(f"Could not set permissions on log file: {str(e)}") # Log application startup logging.info("=== Spotizerr Application Starting ===") diff --git a/docker-compose.yaml b/docker-compose.yaml index e554adf..659b42e 100755 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -34,7 +34,7 @@ services: volumes: - redis-data:/data command: redis-server --requirepass ${REDIS_PASSWORD} --appendonly yes - + volumes: redis-data: driver: local diff --git a/routes/__init__.py b/routes/__init__.py index d4013a1..9f959dc 100755 --- a/routes/__init__.py +++ b/routes/__init__.py @@ -3,22 +3,26 @@ import atexit # Configure basic logging for the application if not already configured # This is a good place for it if routes are a central part of your app structure. -logging.basicConfig(level=logging.INFO, - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" +) logger = logging.getLogger(__name__) try: from routes.utils.watch.manager import start_watch_manager, stop_watch_manager + # Start the playlist watch manager when the application/blueprint is initialized start_watch_manager() # Register the stop function to be called on application exit atexit.register(stop_watch_manager) logger.info("Playlist Watch Manager initialized and registered for shutdown.") except ImportError as e: - logger.error(f"Could not import or start Playlist Watch Manager: {e}. Playlist watching will be disabled.") + logger.error( + f"Could not import or start Playlist Watch Manager: {e}. Playlist watching will be disabled." + ) except Exception as e: - logger.error(f"An unexpected error occurred during Playlist Watch Manager setup: {e}", exc_info=True) - -from .artist import artist_bp -from .prgs import prgs_bp + logger.error( + f"An unexpected error occurred during Playlist Watch Manager setup: {e}", + exc_info=True, + ) diff --git a/routes/album.py b/routes/album.py index 8a3df33..98f6d6d 100755 --- a/routes/album.py +++ b/routes/album.py @@ -1,6 +1,5 @@ from flask import Blueprint, Response, request import json -import os import traceback import uuid import time @@ -8,43 +7,56 @@ from routes.utils.celery_queue_manager import download_queue_manager from routes.utils.celery_tasks import store_task_info, store_task_status, ProgressState from routes.utils.get_info import get_spotify_info -album_bp = Blueprint('album', __name__) +album_bp = Blueprint("album", __name__) -@album_bp.route('/download/', methods=['GET']) + +@album_bp.route("/download/", methods=["GET"]) def handle_download(album_id): # Retrieve essential parameters from the request. # name = request.args.get('name') # artist = request.args.get('artist') - + # Construct the URL from album_id url = f"https://open.spotify.com/album/{album_id}" - + # Fetch metadata from Spotify try: album_info = get_spotify_info(album_id, "album") - if not album_info or not album_info.get('name') or not album_info.get('artists'): + if ( + not album_info + or not album_info.get("name") + or not album_info.get("artists") + ): return Response( - json.dumps({"error": f"Could not retrieve metadata for album ID: {album_id}"}), + json.dumps( + {"error": f"Could not retrieve metadata for album ID: {album_id}"} + ), status=404, - mimetype='application/json' + mimetype="application/json", ) - - name_from_spotify = album_info.get('name') - artist_from_spotify = album_info['artists'][0].get('name') if album_info['artists'] else "Unknown Artist" + + name_from_spotify = album_info.get("name") + artist_from_spotify = ( + album_info["artists"][0].get("name") + if album_info["artists"] + else "Unknown Artist" + ) except Exception as e: return Response( - json.dumps({"error": f"Failed to fetch metadata for album {album_id}: {str(e)}"}), + json.dumps( + {"error": f"Failed to fetch metadata for album {album_id}: {str(e)}"} + ), status=500, - mimetype='application/json' + mimetype="application/json", ) - + # Validate required parameters if not url: return Response( json.dumps({"error": "Missing required parameter: url"}), status=400, - mimetype='application/json' + mimetype="application/json", ) # Add the task to the queue with only essential parameters @@ -53,98 +65,97 @@ def handle_download(album_id): orig_params = request.args.to_dict() orig_params["original_url"] = request.url try: - task_id = download_queue_manager.add_task({ - "download_type": "album", - "url": url, - "name": name_from_spotify, - "artist": artist_from_spotify, - "orig_request": orig_params - }) + task_id = download_queue_manager.add_task( + { + "download_type": "album", + "url": url, + "name": name_from_spotify, + "artist": artist_from_spotify, + "orig_request": orig_params, + } + ) except Exception as e: # Generic error handling for other issues during task submission # Create an error task ID if add_task itself fails before returning an ID error_task_id = str(uuid.uuid4()) - - store_task_info(error_task_id, { - "download_type": "album", - "url": url, - "name": name_from_spotify, - "artist": artist_from_spotify, - "original_request": orig_params, - "created_at": time.time(), - "is_submission_error_task": True - }) - store_task_status(error_task_id, { - "status": ProgressState.ERROR, - "error": f"Failed to queue album download: {str(e)}", - "timestamp": time.time() - }) - return Response( - json.dumps({"error": f"Failed to queue album download: {str(e)}", "task_id": error_task_id}), - status=500, - mimetype='application/json' + + store_task_info( + error_task_id, + { + "download_type": "album", + "url": url, + "name": name_from_spotify, + "artist": artist_from_spotify, + "original_request": orig_params, + "created_at": time.time(), + "is_submission_error_task": True, + }, ) - + store_task_status( + error_task_id, + { + "status": ProgressState.ERROR, + "error": f"Failed to queue album download: {str(e)}", + "timestamp": time.time(), + }, + ) + return Response( + json.dumps( + { + "error": f"Failed to queue album download: {str(e)}", + "task_id": error_task_id, + } + ), + status=500, + mimetype="application/json", + ) + return Response( - json.dumps({"prg_file": task_id}), - status=202, - mimetype='application/json' + json.dumps({"prg_file": task_id}), status=202, mimetype="application/json" ) -@album_bp.route('/download/cancel', methods=['GET']) + +@album_bp.route("/download/cancel", methods=["GET"]) def cancel_download(): """ Cancel a running download process by its prg file name. """ - prg_file = request.args.get('prg_file') + prg_file = request.args.get("prg_file") if not prg_file: return Response( json.dumps({"error": "Missing process id (prg_file) parameter"}), status=400, - mimetype='application/json' + mimetype="application/json", ) # Use the queue manager's cancellation method. result = download_queue_manager.cancel_task(prg_file) status_code = 200 if result.get("status") == "cancelled" else 404 - return Response( - json.dumps(result), - status=status_code, - mimetype='application/json' - ) + return Response(json.dumps(result), status=status_code, mimetype="application/json") -@album_bp.route('/info', methods=['GET']) + +@album_bp.route("/info", methods=["GET"]) def get_album_info(): """ Retrieve Spotify album metadata given a Spotify album ID. Expects a query parameter 'id' that contains the Spotify album ID. """ - spotify_id = request.args.get('id') - + spotify_id = request.args.get("id") + if not spotify_id: return Response( json.dumps({"error": "Missing parameter: id"}), status=400, - mimetype='application/json' + mimetype="application/json", ) - + try: # Import and use the get_spotify_info function from the utility module. from routes.utils.get_info import get_spotify_info + album_info = get_spotify_info(spotify_id, "album") - return Response( - json.dumps(album_info), - status=200, - mimetype='application/json' - ) + return Response(json.dumps(album_info), status=200, mimetype="application/json") except Exception as e: - error_data = { - "error": str(e), - "traceback": traceback.format_exc() - } - return Response( - json.dumps(error_data), - status=500, - mimetype='application/json' - ) + error_data = {"error": str(e), "traceback": traceback.format_exc()} + return Response(json.dumps(error_data), status=500, mimetype="application/json") diff --git a/routes/artist.py b/routes/artist.py index dbd5e32..943132e 100644 --- a/routes/artist.py +++ b/routes/artist.py @@ -1,13 +1,10 @@ -#!/usr/bin/env python3 """ Artist endpoint blueprint. """ from flask import Blueprint, Response, request, jsonify import json -import os import traceback -from routes.utils.celery_queue_manager import download_queue_manager from routes.utils.artist import download_artist_albums # Imports for merged watch functionality @@ -20,22 +17,23 @@ from routes.utils.watch.db import ( get_watched_artists, add_specific_albums_to_artist_table, remove_specific_albums_from_artist_table, - is_album_in_artist_db + is_album_in_artist_db, ) from routes.utils.watch.manager import check_watched_artists, get_watch_config from routes.utils.get_info import get_spotify_info -artist_bp = Blueprint('artist', __name__, url_prefix='/api/artist') +artist_bp = Blueprint("artist", __name__, url_prefix="/api/artist") # Existing log_json can be used, or a logger instance. # Let's initialize a logger for consistency with merged code. logger = logging.getLogger(__name__) + def log_json(message_dict): print(json.dumps(message_dict)) -@artist_bp.route('/download/', methods=['GET']) +@artist_bp.route("/download/", methods=["GET"]) def handle_artist_download(artist_id): """ Enqueues album download tasks for the given artist. @@ -44,57 +42,59 @@ def handle_artist_download(artist_id): """ # Construct the artist URL from artist_id url = f"https://open.spotify.com/artist/{artist_id}" - + # Retrieve essential parameters from the request. - album_type = request.args.get('album_type', "album,single,compilation") - + album_type = request.args.get("album_type", "album,single,compilation") + # Validate required parameters - if not url: # This check is mostly for safety, as url is constructed + if not url: # This check is mostly for safety, as url is constructed return Response( json.dumps({"error": "Missing required parameter: url"}), status=400, - mimetype='application/json' + mimetype="application/json", ) try: # Import and call the updated download_artist_albums() function. # from routes.utils.artist import download_artist_albums # Already imported at top - + # Delegate to the download_artist_albums function which will handle album filtering successfully_queued_albums, duplicate_albums = download_artist_albums( - url=url, - album_type=album_type, - request_args=request.args.to_dict() + url=url, album_type=album_type, request_args=request.args.to_dict() ) - + # Return the list of album task IDs. response_data = { "status": "complete", "message": f"Artist discography processing initiated. {len(successfully_queued_albums)} albums queued.", - "queued_albums": successfully_queued_albums + "queued_albums": successfully_queued_albums, } if duplicate_albums: response_data["duplicate_albums"] = duplicate_albums - response_data["message"] += f" {len(duplicate_albums)} albums were already in progress or queued." + response_data["message"] += ( + f" {len(duplicate_albums)} albums were already in progress or queued." + ) return Response( json.dumps(response_data), - status=202, # Still 202 Accepted as some operations may have succeeded - mimetype='application/json' + status=202, # Still 202 Accepted as some operations may have succeeded + mimetype="application/json", ) except Exception as e: return Response( - json.dumps({ - "status": "error", - "message": str(e), - "traceback": traceback.format_exc() - }), + json.dumps( + { + "status": "error", + "message": str(e), + "traceback": traceback.format_exc(), + } + ), status=500, - mimetype='application/json' + mimetype="application/json", ) -@artist_bp.route('/download/cancel', methods=['GET']) +@artist_bp.route("/download/cancel", methods=["GET"]) def cancel_artist_download(): """ Cancelling an artist download is not supported since the endpoint only enqueues album tasks. @@ -103,60 +103,61 @@ def cancel_artist_download(): return Response( json.dumps({"error": "Artist download cancellation is not supported."}), status=400, - mimetype='application/json' + mimetype="application/json", ) -@artist_bp.route('/info', methods=['GET']) +@artist_bp.route("/info", methods=["GET"]) def get_artist_info(): """ Retrieves Spotify artist metadata given a Spotify artist ID. Expects a query parameter 'id' with the Spotify artist ID. """ - spotify_id = request.args.get('id') - + spotify_id = request.args.get("id") + if not spotify_id: return Response( json.dumps({"error": "Missing parameter: id"}), status=400, - mimetype='application/json' + mimetype="application/json", ) - + try: artist_info = get_spotify_info(spotify_id, "artist_discography") # If artist_info is successfully fetched (it contains album items), # check if the artist is watched and augment album items with is_locally_known status - if artist_info and artist_info.get('items'): - watched_artist_details = get_watched_artist(spotify_id) # spotify_id is the artist ID - if watched_artist_details: # Artist is being watched - for album_item in artist_info['items']: - if album_item and album_item.get('id'): - album_id = album_item['id'] - album_item['is_locally_known'] = is_album_in_artist_db(spotify_id, album_id) - elif album_item: # Album object exists but no ID - album_item['is_locally_known'] = False + if artist_info and artist_info.get("items"): + watched_artist_details = get_watched_artist( + spotify_id + ) # spotify_id is the artist ID + if watched_artist_details: # Artist is being watched + for album_item in artist_info["items"]: + if album_item and album_item.get("id"): + album_id = album_item["id"] + album_item["is_locally_known"] = is_album_in_artist_db( + spotify_id, album_id + ) + elif album_item: # Album object exists but no ID + album_item["is_locally_known"] = False # If not watched, or no albums, is_locally_known will not be added. # Frontend should handle absence of this key as false. return Response( - json.dumps(artist_info), - status=200, - mimetype='application/json' + json.dumps(artist_info), status=200, mimetype="application/json" ) except Exception as e: return Response( - json.dumps({ - "error": str(e), - "traceback": traceback.format_exc() - }), + json.dumps({"error": str(e), "traceback": traceback.format_exc()}), status=500, - mimetype='application/json' + mimetype="application/json", ) + # --- Merged Artist Watch Routes --- -@artist_bp.route('/watch/', methods=['PUT']) + +@artist_bp.route("/watch/", methods=["PUT"]) def add_artist_to_watchlist(artist_spotify_id): """Adds an artist to the watchlist.""" watch_config = get_watch_config() @@ -166,52 +167,90 @@ def add_artist_to_watchlist(artist_spotify_id): logger.info(f"Attempting to add artist {artist_spotify_id} to watchlist.") try: if get_watched_artist(artist_spotify_id): - return jsonify({"message": f"Artist {artist_spotify_id} is already being watched."}), 200 + return jsonify( + {"message": f"Artist {artist_spotify_id} is already being watched."} + ), 200 # This call returns an album list-like structure based on logs - artist_album_list_data = get_spotify_info(artist_spotify_id, "artist_discography") - + artist_album_list_data = get_spotify_info( + artist_spotify_id, "artist_discography" + ) + # Check if we got any data and if it has items - if not artist_album_list_data or not isinstance(artist_album_list_data.get('items'), list): - logger.error(f"Could not fetch album list details for artist {artist_spotify_id} from Spotify using get_spotify_info('artist_discography'). Data: {artist_album_list_data}") - return jsonify({"error": f"Could not fetch sufficient details for artist {artist_spotify_id} to initiate watch."}), 404 + if not artist_album_list_data or not isinstance( + artist_album_list_data.get("items"), list + ): + logger.error( + f"Could not fetch album list details for artist {artist_spotify_id} from Spotify using get_spotify_info('artist_discography'). Data: {artist_album_list_data}" + ) + return jsonify( + { + "error": f"Could not fetch sufficient details for artist {artist_spotify_id} to initiate watch." + } + ), 404 # Attempt to extract artist name and verify ID # The actual artist name might be consistently found in the items, if they exist - artist_name_from_albums = "Unknown Artist" # Default - if artist_album_list_data['items']: - first_album = artist_album_list_data['items'][0] - if first_album and isinstance(first_album.get('artists'), list) and first_album['artists']: + artist_name_from_albums = "Unknown Artist" # Default + if artist_album_list_data["items"]: + first_album = artist_album_list_data["items"][0] + if ( + first_album + and isinstance(first_album.get("artists"), list) + and first_album["artists"] + ): # Find the artist in the list that matches the artist_spotify_id - found_artist = next((art for art in first_album['artists'] if art.get('id') == artist_spotify_id), None) - if found_artist and found_artist.get('name'): - artist_name_from_albums = found_artist['name'] - elif first_album['artists'][0].get('name'): # Fallback to first artist if specific match not found or no ID - artist_name_from_albums = first_album['artists'][0]['name'] - logger.warning(f"Could not find exact artist ID {artist_spotify_id} in first album's artists list. Using name '{artist_name_from_albums}'.") + found_artist = next( + ( + art + for art in first_album["artists"] + if art.get("id") == artist_spotify_id + ), + None, + ) + if found_artist and found_artist.get("name"): + artist_name_from_albums = found_artist["name"] + elif first_album["artists"][0].get( + "name" + ): # Fallback to first artist if specific match not found or no ID + artist_name_from_albums = first_album["artists"][0]["name"] + logger.warning( + f"Could not find exact artist ID {artist_spotify_id} in first album's artists list. Using name '{artist_name_from_albums}'." + ) else: - logger.warning(f"No album items found for artist {artist_spotify_id} to extract name. Using default.") + logger.warning( + f"No album items found for artist {artist_spotify_id} to extract name. Using default." + ) # Construct the artist_data object expected by add_artist_db # We use the provided artist_spotify_id as the primary ID. artist_data_for_db = { - "id": artist_spotify_id, # This is the crucial part + "id": artist_spotify_id, # This is the crucial part "name": artist_name_from_albums, - "albums": { # Mimic structure if add_artist_db expects it for total_albums - "total": artist_album_list_data.get('total', 0) - } + "albums": { # Mimic structure if add_artist_db expects it for total_albums + "total": artist_album_list_data.get("total", 0) + }, # Add any other fields add_artist_db might expect from a true artist object if necessary } - add_artist_db(artist_data_for_db) - - logger.info(f"Artist {artist_spotify_id} ('{artist_name_from_albums}') added to watchlist. Their albums will be processed by the watch manager.") - return jsonify({"message": f"Artist {artist_spotify_id} added to watchlist. Albums will be processed shortly."}), 201 + add_artist_db(artist_data_for_db) + + logger.info( + f"Artist {artist_spotify_id} ('{artist_name_from_albums}') added to watchlist. Their albums will be processed by the watch manager." + ) + return jsonify( + { + "message": f"Artist {artist_spotify_id} added to watchlist. Albums will be processed shortly." + } + ), 201 except Exception as e: - logger.error(f"Error adding artist {artist_spotify_id} to watchlist: {e}", exc_info=True) + logger.error( + f"Error adding artist {artist_spotify_id} to watchlist: {e}", exc_info=True + ) return jsonify({"error": f"Could not add artist to watchlist: {str(e)}"}), 500 -@artist_bp.route('/watch//status', methods=['GET']) + +@artist_bp.route("/watch//status", methods=["GET"]) def get_artist_watch_status(artist_spotify_id): """Checks if a specific artist is being watched.""" logger.info(f"Checking watch status for artist {artist_spotify_id}.") @@ -222,10 +261,14 @@ def get_artist_watch_status(artist_spotify_id): else: return jsonify({"is_watched": False}), 200 except Exception as e: - logger.error(f"Error checking watch status for artist {artist_spotify_id}: {e}", exc_info=True) + logger.error( + f"Error checking watch status for artist {artist_spotify_id}: {e}", + exc_info=True, + ) return jsonify({"error": f"Could not check watch status: {str(e)}"}), 500 -@artist_bp.route('/watch/', methods=['DELETE']) + +@artist_bp.route("/watch/", methods=["DELETE"]) def remove_artist_from_watchlist(artist_spotify_id): """Removes an artist from the watchlist.""" watch_config = get_watch_config() @@ -235,16 +278,26 @@ def remove_artist_from_watchlist(artist_spotify_id): logger.info(f"Attempting to remove artist {artist_spotify_id} from watchlist.") try: if not get_watched_artist(artist_spotify_id): - return jsonify({"error": f"Artist {artist_spotify_id} not found in watchlist."}), 404 - + return jsonify( + {"error": f"Artist {artist_spotify_id} not found in watchlist."} + ), 404 + remove_artist_db(artist_spotify_id) logger.info(f"Artist {artist_spotify_id} removed from watchlist successfully.") - return jsonify({"message": f"Artist {artist_spotify_id} removed from watchlist."}), 200 + return jsonify( + {"message": f"Artist {artist_spotify_id} removed from watchlist."} + ), 200 except Exception as e: - logger.error(f"Error removing artist {artist_spotify_id} from watchlist: {e}", exc_info=True) - return jsonify({"error": f"Could not remove artist from watchlist: {str(e)}"}), 500 + logger.error( + f"Error removing artist {artist_spotify_id} from watchlist: {e}", + exc_info=True, + ) + return jsonify( + {"error": f"Could not remove artist from watchlist: {str(e)}"} + ), 500 -@artist_bp.route('/watch/list', methods=['GET']) + +@artist_bp.route("/watch/list", methods=["GET"]) def list_watched_artists_endpoint(): """Lists all artists currently in the watchlist.""" try: @@ -254,101 +307,201 @@ def list_watched_artists_endpoint(): logger.error(f"Error listing watched artists: {e}", exc_info=True) return jsonify({"error": f"Could not list watched artists: {str(e)}"}), 500 -@artist_bp.route('/watch/trigger_check', methods=['POST']) + +@artist_bp.route("/watch/trigger_check", methods=["POST"]) def trigger_artist_check_endpoint(): """Manually triggers the artist checking mechanism for all watched artists.""" watch_config = get_watch_config() if not watch_config.get("enabled", False): - return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403 + return jsonify( + { + "error": "Watch feature is currently disabled globally. Cannot trigger check." + } + ), 403 logger.info("Manual trigger for artist check received for all artists.") try: thread = threading.Thread(target=check_watched_artists, args=(None,)) thread.start() - return jsonify({"message": "Artist check triggered successfully in the background for all artists."}), 202 + return jsonify( + { + "message": "Artist check triggered successfully in the background for all artists." + } + ), 202 except Exception as e: - logger.error(f"Error manually triggering artist check for all: {e}", exc_info=True) - return jsonify({"error": f"Could not trigger artist check for all: {str(e)}"}), 500 + logger.error( + f"Error manually triggering artist check for all: {e}", exc_info=True + ) + return jsonify( + {"error": f"Could not trigger artist check for all: {str(e)}"} + ), 500 -@artist_bp.route('/watch/trigger_check/', methods=['POST']) + +@artist_bp.route("/watch/trigger_check/", methods=["POST"]) def trigger_specific_artist_check_endpoint(artist_spotify_id: str): """Manually triggers the artist checking mechanism for a specific artist.""" watch_config = get_watch_config() if not watch_config.get("enabled", False): - return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403 + return jsonify( + { + "error": "Watch feature is currently disabled globally. Cannot trigger check." + } + ), 403 - logger.info(f"Manual trigger for specific artist check received for ID: {artist_spotify_id}") + logger.info( + f"Manual trigger for specific artist check received for ID: {artist_spotify_id}" + ) try: watched_artist = get_watched_artist(artist_spotify_id) if not watched_artist: - logger.warning(f"Trigger specific check: Artist ID {artist_spotify_id} not found in watchlist.") - return jsonify({"error": f"Artist {artist_spotify_id} is not in the watchlist. Add it first."}), 404 + logger.warning( + f"Trigger specific check: Artist ID {artist_spotify_id} not found in watchlist." + ) + return jsonify( + { + "error": f"Artist {artist_spotify_id} is not in the watchlist. Add it first." + } + ), 404 - thread = threading.Thread(target=check_watched_artists, args=(artist_spotify_id,)) + thread = threading.Thread( + target=check_watched_artists, args=(artist_spotify_id,) + ) thread.start() - logger.info(f"Artist check triggered in background for specific artist ID: {artist_spotify_id}") - return jsonify({"message": f"Artist check triggered successfully in the background for {artist_spotify_id}."}), 202 + logger.info( + f"Artist check triggered in background for specific artist ID: {artist_spotify_id}" + ) + return jsonify( + { + "message": f"Artist check triggered successfully in the background for {artist_spotify_id}." + } + ), 202 except Exception as e: - logger.error(f"Error manually triggering specific artist check for {artist_spotify_id}: {e}", exc_info=True) - return jsonify({"error": f"Could not trigger artist check for {artist_spotify_id}: {str(e)}"}), 500 + logger.error( + f"Error manually triggering specific artist check for {artist_spotify_id}: {e}", + exc_info=True, + ) + return jsonify( + { + "error": f"Could not trigger artist check for {artist_spotify_id}: {str(e)}" + } + ), 500 -@artist_bp.route('/watch//albums', methods=['POST']) + +@artist_bp.route("/watch//albums", methods=["POST"]) def mark_albums_as_known_for_artist(artist_spotify_id): """Fetches details for given album IDs and adds/updates them in the artist's local DB table.""" watch_config = get_watch_config() if not watch_config.get("enabled", False): - return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark albums."}), 403 + return jsonify( + { + "error": "Watch feature is currently disabled globally. Cannot mark albums." + } + ), 403 logger.info(f"Attempting to mark albums as known for artist {artist_spotify_id}.") try: album_ids = request.json - if not isinstance(album_ids, list) or not all(isinstance(aid, str) for aid in album_ids): - return jsonify({"error": "Invalid request body. Expecting a JSON array of album Spotify IDs."}), 400 - + if not isinstance(album_ids, list) or not all( + isinstance(aid, str) for aid in album_ids + ): + return jsonify( + { + "error": "Invalid request body. Expecting a JSON array of album Spotify IDs." + } + ), 400 + if not get_watched_artist(artist_spotify_id): - return jsonify({"error": f"Artist {artist_spotify_id} is not being watched."}), 404 + return jsonify( + {"error": f"Artist {artist_spotify_id} is not being watched."} + ), 404 fetched_albums_details = [] for album_id in album_ids: try: # We need full album details. get_spotify_info with type "album" should provide this. - album_detail = get_spotify_info(album_id, "album") - if album_detail and album_detail.get('id'): + album_detail = get_spotify_info(album_id, "album") + if album_detail and album_detail.get("id"): fetched_albums_details.append(album_detail) else: - logger.warning(f"Could not fetch details for album {album_id} when marking as known for artist {artist_spotify_id}.") + logger.warning( + f"Could not fetch details for album {album_id} when marking as known for artist {artist_spotify_id}." + ) except Exception as e: - logger.error(f"Failed to fetch Spotify details for album {album_id}: {e}") - - if not fetched_albums_details: - return jsonify({"message": "No valid album details could be fetched to mark as known.", "processed_count": 0}), 200 + logger.error( + f"Failed to fetch Spotify details for album {album_id}: {e}" + ) - processed_count = add_specific_albums_to_artist_table(artist_spotify_id, fetched_albums_details) - logger.info(f"Successfully marked/updated {processed_count} albums as known for artist {artist_spotify_id}.") - return jsonify({"message": f"Successfully processed {processed_count} albums for artist {artist_spotify_id}."}), 200 + if not fetched_albums_details: + return jsonify( + { + "message": "No valid album details could be fetched to mark as known.", + "processed_count": 0, + } + ), 200 + + processed_count = add_specific_albums_to_artist_table( + artist_spotify_id, fetched_albums_details + ) + logger.info( + f"Successfully marked/updated {processed_count} albums as known for artist {artist_spotify_id}." + ) + return jsonify( + { + "message": f"Successfully processed {processed_count} albums for artist {artist_spotify_id}." + } + ), 200 except Exception as e: - logger.error(f"Error marking albums as known for artist {artist_spotify_id}: {e}", exc_info=True) + logger.error( + f"Error marking albums as known for artist {artist_spotify_id}: {e}", + exc_info=True, + ) return jsonify({"error": f"Could not mark albums as known: {str(e)}"}), 500 -@artist_bp.route('/watch//albums', methods=['DELETE']) + +@artist_bp.route("/watch//albums", methods=["DELETE"]) def mark_albums_as_missing_locally_for_artist(artist_spotify_id): """Removes specified albums from the artist's local DB table.""" watch_config = get_watch_config() if not watch_config.get("enabled", False): - return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark albums."}), 403 + return jsonify( + { + "error": "Watch feature is currently disabled globally. Cannot mark albums." + } + ), 403 - logger.info(f"Attempting to mark albums as missing (delete locally) for artist {artist_spotify_id}.") + logger.info( + f"Attempting to mark albums as missing (delete locally) for artist {artist_spotify_id}." + ) try: album_ids = request.json - if not isinstance(album_ids, list) or not all(isinstance(aid, str) for aid in album_ids): - return jsonify({"error": "Invalid request body. Expecting a JSON array of album Spotify IDs."}), 400 + if not isinstance(album_ids, list) or not all( + isinstance(aid, str) for aid in album_ids + ): + return jsonify( + { + "error": "Invalid request body. Expecting a JSON array of album Spotify IDs." + } + ), 400 if not get_watched_artist(artist_spotify_id): - return jsonify({"error": f"Artist {artist_spotify_id} is not being watched."}), 404 + return jsonify( + {"error": f"Artist {artist_spotify_id} is not being watched."} + ), 404 - deleted_count = remove_specific_albums_from_artist_table(artist_spotify_id, album_ids) - logger.info(f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}.") - return jsonify({"message": f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}."}), 200 + deleted_count = remove_specific_albums_from_artist_table( + artist_spotify_id, album_ids + ) + logger.info( + f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}." + ) + return jsonify( + { + "message": f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}." + } + ), 200 except Exception as e: - logger.error(f"Error marking albums as missing (deleting locally) for artist {artist_spotify_id}: {e}", exc_info=True) + logger.error( + f"Error marking albums as missing (deleting locally) for artist {artist_spotify_id}: {e}", + exc_info=True, + ) return jsonify({"error": f"Could not mark albums as missing: {str(e)}"}), 500 diff --git a/routes/config.py b/routes/config.py index a314ac5..19a8adf 100644 --- a/routes/config.py +++ b/routes/config.py @@ -1,79 +1,69 @@ from flask import Blueprint, jsonify, request import json -from pathlib import Path import logging -import threading -import time import os +from typing import Any # Import the centralized config getters that handle file creation and defaults -from routes.utils.celery_config import get_config_params as get_main_config_params, DEFAULT_MAIN_CONFIG, CONFIG_FILE_PATH as MAIN_CONFIG_FILE_PATH -from routes.utils.watch.manager import get_watch_config as get_watch_manager_config, DEFAULT_WATCH_CONFIG, CONFIG_FILE_PATH as WATCH_CONFIG_FILE_PATH +from routes.utils.celery_config import ( + get_config_params as get_main_config_params, + DEFAULT_MAIN_CONFIG, + CONFIG_FILE_PATH as MAIN_CONFIG_FILE_PATH, +) +from routes.utils.watch.manager import ( + get_watch_config as get_watch_manager_config, + DEFAULT_WATCH_CONFIG, + CONFIG_FILE_PATH as WATCH_CONFIG_FILE_PATH, +) logger = logging.getLogger(__name__) -config_bp = Blueprint('config', __name__) +config_bp = Blueprint("config", __name__) -# Path to main config file (consistent with celery_config.py) -# CONFIG_PATH = Path('./data/config/main.json') # Defined as MAIN_CONFIG_FILE_PATH from import -# Path to watch config file (consistent with watch/manager.py) -# WATCH_CONFIG_PATH = Path('./data/config/watch.json') # Defined as WATCH_CONFIG_FILE_PATH from import # Flag for config change notifications config_changed = False -last_config = {} +last_config: dict[str, Any] = {} # Define parameters that should trigger notification when changed NOTIFY_PARAMETERS = [ - 'maxConcurrentDownloads', - 'service', - 'fallback', - 'spotifyQuality', - 'deezerQuality' + "maxConcurrentDownloads", + "service", + "fallback", + "spotifyQuality", + "deezerQuality", ] + # Helper to get main config (uses the one from celery_config) def get_config(): """Retrieves the main configuration, creating it with defaults if necessary.""" return get_main_config_params() + # Helper to save main config def save_config(config_data): """Saves the main configuration data to main.json.""" try: MAIN_CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True) - # Ensure all default keys are present before saving, merging if necessary - current_defaults = DEFAULT_MAIN_CONFIG.copy() - # Overlay provided data on defaults to ensure all keys are there. - # This might not be ideal if user explicitly wants to remove a key, - # but for this setup, ensuring defaults is safer. - # A better approach for full PUT might be to replace entirely, - # but for ensuring defaults, this is okay. - # Let's assume config_data is what the user intends fully. - # We'll rely on get_config_params to have already populated defaults if the file was new. - # When saving, we should just save what's given, after ensuring it has necessary structure. - - # Merge with defaults to ensure all keys are present - # This ensures that if a user POSTs partial data, it's merged with existing/default structure - # Load current or default config existing_config = {} if MAIN_CONFIG_FILE_PATH.exists(): - with open(MAIN_CONFIG_FILE_PATH, 'r') as f_read: + with open(MAIN_CONFIG_FILE_PATH, "r") as f_read: existing_config = json.load(f_read) - else: # Should be rare if get_config_params was called + else: # Should be rare if get_config_params was called existing_config = DEFAULT_MAIN_CONFIG.copy() # Update with new data for key, value in config_data.items(): existing_config[key] = value - + # Ensure all default keys are still there for default_key, default_value in DEFAULT_MAIN_CONFIG.items(): if default_key not in existing_config: existing_config[default_key] = default_value - - with open(MAIN_CONFIG_FILE_PATH, 'w') as f: + + with open(MAIN_CONFIG_FILE_PATH, "w") as f: json.dump(existing_config, f, indent=4) logger.info(f"Main configuration saved to {MAIN_CONFIG_FILE_PATH}") return True, None @@ -81,33 +71,35 @@ def save_config(config_data): logger.error(f"Error saving main configuration: {e}", exc_info=True) return False, str(e) + # Helper to get watch config (uses the one from watch/manager.py) -def get_watch_config_http(): # Renamed to avoid conflict with the imported get_watch_config +def get_watch_config_http(): # Renamed to avoid conflict with the imported get_watch_config """Retrieves the watch configuration, creating it with defaults if necessary.""" return get_watch_manager_config() + # Helper to save watch config -def save_watch_config_http(watch_config_data): # Renamed +def save_watch_config_http(watch_config_data): # Renamed """Saves the watch configuration data to watch.json.""" try: WATCH_CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True) - + # Similar logic to save_config: merge with defaults/existing existing_config = {} if WATCH_CONFIG_FILE_PATH.exists(): - with open(WATCH_CONFIG_FILE_PATH, 'r') as f_read: + with open(WATCH_CONFIG_FILE_PATH, "r") as f_read: existing_config = json.load(f_read) - else: # Should be rare if get_watch_manager_config was called + else: # Should be rare if get_watch_manager_config was called existing_config = DEFAULT_WATCH_CONFIG.copy() for key, value in watch_config_data.items(): existing_config[key] = value - + for default_key, default_value in DEFAULT_WATCH_CONFIG.items(): if default_key not in existing_config: existing_config[default_key] = default_value - with open(WATCH_CONFIG_FILE_PATH, 'w') as f: + with open(WATCH_CONFIG_FILE_PATH, "w") as f: json.dump(existing_config, f, indent=4) logger.info(f"Watch configuration saved to {WATCH_CONFIG_FILE_PATH}") return True, None @@ -115,7 +107,8 @@ def save_watch_config_http(watch_config_data): # Renamed logger.error(f"Error saving watch configuration: {e}", exc_info=True) return False, str(e) -@config_bp.route('/config', methods=['GET']) + +@config_bp.route("/config", methods=["GET"]) def handle_config(): """Handles GET requests for the main configuration.""" try: @@ -123,9 +116,12 @@ def handle_config(): return jsonify(config) except Exception as e: logger.error(f"Error in GET /config: {e}", exc_info=True) - return jsonify({"error": "Failed to retrieve configuration", "details": str(e)}), 500 + return jsonify( + {"error": "Failed to retrieve configuration", "details": str(e)} + ), 500 -@config_bp.route('/config', methods=['POST', 'PUT']) + +@config_bp.route("/config", methods=["POST", "PUT"]) def update_config(): """Handles POST/PUT requests to update the main configuration.""" try: @@ -133,12 +129,9 @@ def update_config(): if not isinstance(new_config, dict): return jsonify({"error": "Invalid config format"}), 400 - # Get existing config to preserve environment-controlled values - existing_config = get_config() or {} - # Preserve the explicitFilter setting from environment - explicit_filter_env = os.environ.get('EXPLICIT_FILTER', 'false').lower() - new_config['explicitFilter'] = explicit_filter_env in ('true', '1', 'yes', 'on') + explicit_filter_env = os.environ.get("EXPLICIT_FILTER", "false").lower() + new_config["explicitFilter"] = explicit_filter_env in ("true", "1", "yes", "on") success, error_msg = save_config(new_config) if success: @@ -147,33 +140,42 @@ def update_config(): if updated_config_values is None: # This case should ideally not be reached if save_config succeeded # and get_config handles errors by returning a default or None. - return jsonify({"error": "Failed to retrieve configuration after saving"}), 500 - + return jsonify( + {"error": "Failed to retrieve configuration after saving"} + ), 500 + return jsonify(updated_config_values) else: - return jsonify({"error": "Failed to update configuration", "details": error_msg}), 500 + return jsonify( + {"error": "Failed to update configuration", "details": error_msg} + ), 500 except json.JSONDecodeError: return jsonify({"error": "Invalid JSON data"}), 400 except Exception as e: logger.error(f"Error in POST/PUT /config: {e}", exc_info=True) - return jsonify({"error": "Failed to update configuration", "details": str(e)}), 500 + return jsonify( + {"error": "Failed to update configuration", "details": str(e)} + ), 500 -@config_bp.route('/config/check', methods=['GET']) + +@config_bp.route("/config/check", methods=["GET"]) def check_config_changes(): # This endpoint seems more related to dynamically checking if config changed # on disk, which might not be necessary if settings are applied on restart # or by a dedicated manager. For now, just return current config. try: config = get_config() - return jsonify({ - "message": "Current configuration retrieved.", - "config": config - }) + return jsonify( + {"message": "Current configuration retrieved.", "config": config} + ) except Exception as e: logger.error(f"Error in GET /config/check: {e}", exc_info=True) - return jsonify({"error": "Failed to check configuration", "details": str(e)}), 500 + return jsonify( + {"error": "Failed to check configuration", "details": str(e)} + ), 500 -@config_bp.route('/config/watch', methods=['GET']) + +@config_bp.route("/config/watch", methods=["GET"]) def handle_watch_config(): """Handles GET requests for the watch configuration.""" try: @@ -181,9 +183,12 @@ def handle_watch_config(): return jsonify(watch_config) except Exception as e: logger.error(f"Error in GET /config/watch: {e}", exc_info=True) - return jsonify({"error": "Failed to retrieve watch configuration", "details": str(e)}), 500 + return jsonify( + {"error": "Failed to retrieve watch configuration", "details": str(e)} + ), 500 -@config_bp.route('/config/watch', methods=['POST', 'PUT']) + +@config_bp.route("/config/watch", methods=["POST", "PUT"]) def update_watch_config(): """Handles POST/PUT requests to update the watch configuration.""" try: @@ -195,9 +200,13 @@ def update_watch_config(): if success: return jsonify({"message": "Watch configuration updated successfully"}), 200 else: - return jsonify({"error": "Failed to update watch configuration", "details": error_msg}), 500 + return jsonify( + {"error": "Failed to update watch configuration", "details": error_msg} + ), 500 except json.JSONDecodeError: return jsonify({"error": "Invalid JSON data for watch config"}), 400 except Exception as e: logger.error(f"Error in POST/PUT /config/watch: {e}", exc_info=True) - return jsonify({"error": "Failed to update watch configuration", "details": str(e)}), 500 \ No newline at end of file + return jsonify( + {"error": "Failed to update watch configuration", "details": str(e)} + ), 500 diff --git a/routes/credentials.py b/routes/credentials.py index dd0cb02..dfc31f0 100755 --- a/routes/credentials.py +++ b/routes/credentials.py @@ -8,80 +8,99 @@ from routes.utils.credentials import ( init_credentials_db, # Import new utility functions for global Spotify API creds _get_global_spotify_api_creds, - save_global_spotify_api_creds + save_global_spotify_api_creds, ) -from pathlib import Path import logging logger = logging.getLogger(__name__) -credentials_bp = Blueprint('credentials', __name__) +credentials_bp = Blueprint("credentials", __name__) # Initialize the database and tables when the blueprint is loaded init_credentials_db() -@credentials_bp.route('/spotify_api_config', methods=['GET', 'PUT']) + +@credentials_bp.route("/spotify_api_config", methods=["GET", "PUT"]) def handle_spotify_api_config(): """Handles GET and PUT requests for the global Spotify API client_id and client_secret.""" try: - if request.method == 'GET': + if request.method == "GET": client_id, client_secret = _get_global_spotify_api_creds() if client_id is not None and client_secret is not None: - return jsonify({"client_id": client_id, "client_secret": client_secret}), 200 + return jsonify( + {"client_id": client_id, "client_secret": client_secret} + ), 200 else: # If search.json exists but is empty/incomplete, or doesn't exist - return jsonify({ - "warning": "Global Spotify API credentials are not fully configured or file is missing.", - "client_id": client_id or "", - "client_secret": client_secret or "" - }), 200 - - elif request.method == 'PUT': + return jsonify( + { + "warning": "Global Spotify API credentials are not fully configured or file is missing.", + "client_id": client_id or "", + "client_secret": client_secret or "", + } + ), 200 + + elif request.method == "PUT": data = request.get_json() - if not data or 'client_id' not in data or 'client_secret' not in data: - return jsonify({"error": "Request body must contain 'client_id' and 'client_secret'"}), 400 - - client_id = data['client_id'] - client_secret = data['client_secret'] + if not data or "client_id" not in data or "client_secret" not in data: + return jsonify( + { + "error": "Request body must contain 'client_id' and 'client_secret'" + } + ), 400 + + client_id = data["client_id"] + client_secret = data["client_secret"] if not isinstance(client_id, str) or not isinstance(client_secret, str): - return jsonify({"error": "'client_id' and 'client_secret' must be strings"}), 400 + return jsonify( + {"error": "'client_id' and 'client_secret' must be strings"} + ), 400 if save_global_spotify_api_creds(client_id, client_secret): - return jsonify({"message": "Global Spotify API credentials updated successfully."}), 200 + return jsonify( + {"message": "Global Spotify API credentials updated successfully."} + ), 200 else: - return jsonify({"error": "Failed to save global Spotify API credentials."}), 500 - + return jsonify( + {"error": "Failed to save global Spotify API credentials."} + ), 500 + except Exception as e: logger.error(f"Error in /spotify_api_config: {e}", exc_info=True) return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500 -@credentials_bp.route('/', methods=['GET']) +@credentials_bp.route("/", methods=["GET"]) def handle_list_credentials(service): try: - if service not in ['spotify', 'deezer']: - return jsonify({"error": "Invalid service. Must be 'spotify' or 'deezer'"}), 400 + if service not in ["spotify", "deezer"]: + return jsonify( + {"error": "Invalid service. Must be 'spotify' or 'deezer'"} + ), 400 return jsonify(list_credentials(service)) - except ValueError as e: # Should not happen with service check above + except ValueError as e: # Should not happen with service check above return jsonify({"error": str(e)}), 400 except Exception as e: logger.error(f"Error listing credentials for {service}: {e}", exc_info=True) return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500 -@credentials_bp.route('//', methods=['GET', 'POST', 'PUT', 'DELETE']) + +@credentials_bp.route("//", methods=["GET", "POST", "PUT", "DELETE"]) def handle_single_credential(service, name): try: - if service not in ['spotify', 'deezer']: - return jsonify({"error": "Invalid service. Must be 'spotify' or 'deezer'"}), 400 + if service not in ["spotify", "deezer"]: + return jsonify( + {"error": "Invalid service. Must be 'spotify' or 'deezer'"} + ), 400 # cred_type logic is removed for Spotify as API keys are global. # For Deezer, it's always 'credentials' type implicitly. - - if request.method == 'GET': + + if request.method == "GET": # get_credential for Spotify now only returns region and blob_file_path return jsonify(get_credential(service, name)) - - elif request.method == 'POST': + + elif request.method == "POST": data = request.get_json() if not data: return jsonify({"error": "Request body cannot be empty."}), 400 @@ -89,21 +108,36 @@ def handle_single_credential(service, name): # For Deezer, it expects 'arl' and 'region' # Validation is handled within create_credential utility function result = create_credential(service, name, data) - return jsonify({"message": f"Credential for '{name}' ({service}) created successfully.", "details": result}), 201 - - elif request.method == 'PUT': + return jsonify( + { + "message": f"Credential for '{name}' ({service}) created successfully.", + "details": result, + } + ), 201 + + elif request.method == "PUT": data = request.get_json() if not data: return jsonify({"error": "Request body cannot be empty."}), 400 # edit_credential for Spotify now handles updates to 'region', 'blob_content' # For Deezer, 'arl', 'region' result = edit_credential(service, name, data) - return jsonify({"message": f"Credential for '{name}' ({service}) updated successfully.", "details": result}) - - elif request.method == 'DELETE': + return jsonify( + { + "message": f"Credential for '{name}' ({service}) updated successfully.", + "details": result, + } + ) + + elif request.method == "DELETE": # delete_credential for Spotify also handles deleting the blob directory result = delete_credential(service, name) - return jsonify({"message": f"Credential for '{name}' ({service}) deleted successfully.", "details": result}) + return jsonify( + { + "message": f"Credential for '{name}' ({service}) deleted successfully.", + "details": result, + } + ) except (ValueError, FileNotFoundError, FileExistsError) as e: status_code = 400 @@ -117,18 +151,22 @@ def handle_single_credential(service, name): logger.error(f"Server error in /<{service}>/<{name}>: {e}", exc_info=True) return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500 + # The '/search//' route is now obsolete for Spotify and has been removed. -@credentials_bp.route('/all/', methods=['GET']) + +@credentials_bp.route("/all/", methods=["GET"]) def handle_all_credentials(service): """Lists all credentials for a given service. For Spotify, API keys are global and not listed per account.""" try: - if service not in ['spotify', 'deezer']: - return jsonify({"error": "Invalid service. Must be 'spotify' or 'deezer'"}), 400 - + if service not in ["spotify", "deezer"]: + return jsonify( + {"error": "Invalid service. Must be 'spotify' or 'deezer'"} + ), 400 + credentials_list = [] - account_names = list_credentials(service) # This lists names from DB - + account_names = list_credentials(service) # This lists names from DB + for name in account_names: try: # get_credential for Spotify returns region and blob_file_path. @@ -137,17 +175,28 @@ def handle_all_credentials(service): # We don't add global Spotify API keys here as they are separate credentials_list.append({"name": name, "details": account_data}) except FileNotFoundError: - logger.warning(f"Credential name '{name}' listed for service '{service}' but not found by get_credential. Skipping.") + logger.warning( + f"Credential name '{name}' listed for service '{service}' but not found by get_credential. Skipping." + ) except Exception as e_inner: - logger.error(f"Error fetching details for credential '{name}' ({service}): {e_inner}", exc_info=True) - credentials_list.append({"name": name, "error": f"Could not retrieve details: {str(e_inner)}"}) - + logger.error( + f"Error fetching details for credential '{name}' ({service}): {e_inner}", + exc_info=True, + ) + credentials_list.append( + { + "name": name, + "error": f"Could not retrieve details: {str(e_inner)}", + } + ) + return jsonify(credentials_list) except Exception as e: logger.error(f"Error in /all/{service}: {e}", exc_info=True) return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500 -@credentials_bp.route('/markets', methods=['GET']) + +@credentials_bp.route("/markets", methods=["GET"]) def handle_markets(): """ Returns a list of unique market regions for Deezer and Spotify accounts. @@ -157,30 +206,36 @@ def handle_markets(): spotify_regions = set() # Process Deezer accounts - deezer_account_names = list_credentials('deezer') + deezer_account_names = list_credentials("deezer") for name in deezer_account_names: try: - account_data = get_credential('deezer', name) - if account_data and 'region' in account_data and account_data['region']: - deezer_regions.add(account_data['region']) + account_data = get_credential("deezer", name) + if account_data and "region" in account_data and account_data["region"]: + deezer_regions.add(account_data["region"]) except Exception as e: - logger.warning(f"Could not retrieve region for deezer account {name}: {e}") + logger.warning( + f"Could not retrieve region for deezer account {name}: {e}" + ) # Process Spotify accounts - spotify_account_names = list_credentials('spotify') + spotify_account_names = list_credentials("spotify") for name in spotify_account_names: try: - account_data = get_credential('spotify', name) - if account_data and 'region' in account_data and account_data['region']: - spotify_regions.add(account_data['region']) + account_data = get_credential("spotify", name) + if account_data and "region" in account_data and account_data["region"]: + spotify_regions.add(account_data["region"]) except Exception as e: - logger.warning(f"Could not retrieve region for spotify account {name}: {e}") - - return jsonify({ - "deezer": sorted(list(deezer_regions)), - "spotify": sorted(list(spotify_regions)) - }), 200 + logger.warning( + f"Could not retrieve region for spotify account {name}: {e}" + ) + + return jsonify( + { + "deezer": sorted(list(deezer_regions)), + "spotify": sorted(list(spotify_regions)), + } + ), 200 except Exception as e: logger.error(f"Error in /markets: {e}", exc_info=True) - return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500 \ No newline at end of file + return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500 diff --git a/routes/history.py b/routes/history.py index 69ec7f8..4c2f238 100644 --- a/routes/history.py +++ b/routes/history.py @@ -3,40 +3,45 @@ from routes.utils.history_manager import get_history_entries import logging logger = logging.getLogger(__name__) -history_bp = Blueprint('history', __name__, url_prefix='/api/history') +history_bp = Blueprint("history", __name__, url_prefix="/api/history") -@history_bp.route('', methods=['GET']) + +@history_bp.route("", methods=["GET"]) def get_download_history(): """API endpoint to retrieve download history with pagination, sorting, and filtering.""" try: - limit = request.args.get('limit', 25, type=int) - offset = request.args.get('offset', 0, type=int) - sort_by = request.args.get('sort_by', 'timestamp_completed') - sort_order = request.args.get('sort_order', 'DESC') - + limit = request.args.get("limit", 25, type=int) + offset = request.args.get("offset", 0, type=int) + sort_by = request.args.get("sort_by", "timestamp_completed") + sort_order = request.args.get("sort_order", "DESC") + # Basic filtering example: filter by status_final or download_type filters = {} - status_filter = request.args.get('status_final') + status_filter = request.args.get("status_final") if status_filter: - filters['status_final'] = status_filter - - type_filter = request.args.get('download_type') + filters["status_final"] = status_filter + + type_filter = request.args.get("download_type") if type_filter: - filters['download_type'] = type_filter - + filters["download_type"] = type_filter + # Add more filters as needed, e.g., by item_name (would need LIKE for partial match) # search_term = request.args.get('search') # if search_term: # filters['item_name'] = f'%{search_term}%' # This would require LIKE in get_history_entries - entries, total_count = get_history_entries(limit, offset, sort_by, sort_order, filters) - - return jsonify({ - 'entries': entries, - 'total_count': total_count, - 'limit': limit, - 'offset': offset - }) + entries, total_count = get_history_entries( + limit, offset, sort_by, sort_order, filters + ) + + return jsonify( + { + "entries": entries, + "total_count": total_count, + "limit": limit, + "offset": offset, + } + ) except Exception as e: logger.error(f"Error in /api/history endpoint: {e}", exc_info=True) - return jsonify({"error": "Failed to retrieve download history"}), 500 \ No newline at end of file + return jsonify({"error": "Failed to retrieve download history"}), 500 diff --git a/routes/playlist.py b/routes/playlist.py index b7a7966..268b772 100755 --- a/routes/playlist.py +++ b/routes/playlist.py @@ -1,13 +1,16 @@ from flask import Blueprint, Response, request, jsonify -import os import json import traceback -import logging # Added logging import -import uuid # For generating error task IDs -import time # For timestamps +import logging # Added logging import +import uuid # For generating error task IDs +import time # For timestamps from routes.utils.celery_queue_manager import download_queue_manager -from routes.utils.celery_tasks import store_task_info, store_task_status, ProgressState # For error task creation -import threading # For playlist watch trigger +from routes.utils.celery_tasks import ( + store_task_info, + store_task_status, + ProgressState, +) # For error task creation +import threading # For playlist watch trigger # Imports from playlist_watch.py from routes.utils.watch.db import ( @@ -17,15 +20,19 @@ from routes.utils.watch.db import ( get_watched_playlists, add_specific_tracks_to_playlist_table, remove_specific_tracks_from_playlist_table, - is_track_in_playlist_db # Added import + is_track_in_playlist_db, # Added import ) -from routes.utils.get_info import get_spotify_info # Already used, but ensure it's here -from routes.utils.watch.manager import check_watched_playlists, get_watch_config # For manual trigger & config +from routes.utils.get_info import get_spotify_info # Already used, but ensure it's here +from routes.utils.watch.manager import ( + check_watched_playlists, + get_watch_config, +) # For manual trigger & config -logger = logging.getLogger(__name__) # Added logger initialization -playlist_bp = Blueprint('playlist', __name__, url_prefix='/api/playlist') +logger = logging.getLogger(__name__) # Added logger initialization +playlist_bp = Blueprint("playlist", __name__, url_prefix="/api/playlist") -@playlist_bp.route('/download/', methods=['GET']) + +@playlist_bp.route("/download/", methods=["GET"]) def handle_download(playlist_id): # Retrieve essential parameters from the request. # name = request.args.get('name') # Removed @@ -34,150 +41,171 @@ def handle_download(playlist_id): # Construct the URL from playlist_id url = f"https://open.spotify.com/playlist/{playlist_id}" - orig_params["original_url"] = request.url # Update original_url to the constructed one + orig_params["original_url"] = ( + request.url + ) # Update original_url to the constructed one # Fetch metadata from Spotify try: playlist_info = get_spotify_info(playlist_id, "playlist") - if not playlist_info or not playlist_info.get('name') or not playlist_info.get('owner'): + if ( + not playlist_info + or not playlist_info.get("name") + or not playlist_info.get("owner") + ): return Response( - json.dumps({"error": f"Could not retrieve metadata for playlist ID: {playlist_id}"}), + json.dumps( + { + "error": f"Could not retrieve metadata for playlist ID: {playlist_id}" + } + ), status=404, - mimetype='application/json' + mimetype="application/json", ) - - name_from_spotify = playlist_info.get('name') + + name_from_spotify = playlist_info.get("name") # Use owner's display_name as the 'artist' for playlists - owner_info = playlist_info.get('owner', {}) - artist_from_spotify = owner_info.get('display_name', "Unknown Owner") + owner_info = playlist_info.get("owner", {}) + artist_from_spotify = owner_info.get("display_name", "Unknown Owner") except Exception as e: return Response( - json.dumps({"error": f"Failed to fetch metadata for playlist {playlist_id}: {str(e)}"}), + json.dumps( + { + "error": f"Failed to fetch metadata for playlist {playlist_id}: {str(e)}" + } + ), status=500, - mimetype='application/json' + mimetype="application/json", ) # Validate required parameters - if not url: # This check might be redundant now but kept for safety + if not url: # This check might be redundant now but kept for safety return Response( json.dumps({"error": "Missing required parameter: url"}), - status=400, - mimetype='application/json' + status=400, + mimetype="application/json", ) - + try: - task_id = download_queue_manager.add_task({ - "download_type": "playlist", - "url": url, - "name": name_from_spotify, # Use fetched name - "artist": artist_from_spotify, # Use fetched owner name as artist - "orig_request": orig_params - }) + task_id = download_queue_manager.add_task( + { + "download_type": "playlist", + "url": url, + "name": name_from_spotify, # Use fetched name + "artist": artist_from_spotify, # Use fetched owner name as artist + "orig_request": orig_params, + } + ) # Removed DuplicateDownloadError handling, add_task now manages this by creating an error task. except Exception as e: # Generic error handling for other issues during task submission error_task_id = str(uuid.uuid4()) - store_task_info(error_task_id, { - "download_type": "playlist", - "url": url, - "name": name_from_spotify, # Use fetched name - "artist": artist_from_spotify, # Use fetched owner name as artist - "original_request": orig_params, - "created_at": time.time(), - "is_submission_error_task": True - }) - store_task_status(error_task_id, { - "status": ProgressState.ERROR, - "error": f"Failed to queue playlist download: {str(e)}", - "timestamp": time.time() - }) - return Response( - json.dumps({"error": f"Failed to queue playlist download: {str(e)}", "task_id": error_task_id}), - status=500, - mimetype='application/json' + store_task_info( + error_task_id, + { + "download_type": "playlist", + "url": url, + "name": name_from_spotify, # Use fetched name + "artist": artist_from_spotify, # Use fetched owner name as artist + "original_request": orig_params, + "created_at": time.time(), + "is_submission_error_task": True, + }, ) - + store_task_status( + error_task_id, + { + "status": ProgressState.ERROR, + "error": f"Failed to queue playlist download: {str(e)}", + "timestamp": time.time(), + }, + ) + return Response( + json.dumps( + { + "error": f"Failed to queue playlist download: {str(e)}", + "task_id": error_task_id, + } + ), + status=500, + mimetype="application/json", + ) + return Response( - json.dumps({"prg_file": task_id}), # prg_file is the old name for task_id + json.dumps({"prg_file": task_id}), # prg_file is the old name for task_id status=202, - mimetype='application/json' + mimetype="application/json", ) -@playlist_bp.route('/download/cancel', methods=['GET']) + +@playlist_bp.route("/download/cancel", methods=["GET"]) def cancel_download(): """ Cancel a running playlist download process by its prg file name. """ - prg_file = request.args.get('prg_file') + prg_file = request.args.get("prg_file") if not prg_file: return Response( json.dumps({"error": "Missing process id (prg_file) parameter"}), status=400, - mimetype='application/json' + mimetype="application/json", ) - + # Use the queue manager's cancellation method. result = download_queue_manager.cancel_task(prg_file) status_code = 200 if result.get("status") == "cancelled" else 404 - - return Response( - json.dumps(result), - status=status_code, - mimetype='application/json' - ) -@playlist_bp.route('/info', methods=['GET']) + return Response(json.dumps(result), status=status_code, mimetype="application/json") + + +@playlist_bp.route("/info", methods=["GET"]) def get_playlist_info(): """ Retrieve Spotify playlist metadata given a Spotify playlist ID. Expects a query parameter 'id' that contains the Spotify playlist ID. """ - spotify_id = request.args.get('id') - + spotify_id = request.args.get("id") + if not spotify_id: return Response( json.dumps({"error": "Missing parameter: id"}), status=400, - mimetype='application/json' + mimetype="application/json", ) - + try: # Import and use the get_spotify_info function from the utility module. playlist_info = get_spotify_info(spotify_id, "playlist") - + # If playlist_info is successfully fetched, check if it's watched # and augment track items with is_locally_known status - if playlist_info and playlist_info.get('id'): - watched_playlist_details = get_watched_playlist(playlist_info['id']) - if watched_playlist_details: # Playlist is being watched - if playlist_info.get('tracks') and playlist_info['tracks'].get('items'): - for item in playlist_info['tracks']['items']: - if item and item.get('track') and item['track'].get('id'): - track_id = item['track']['id'] - item['track']['is_locally_known'] = is_track_in_playlist_db(playlist_info['id'], track_id) - elif item and item.get('track'): # Track object exists but no ID - item['track']['is_locally_known'] = False + if playlist_info and playlist_info.get("id"): + watched_playlist_details = get_watched_playlist(playlist_info["id"]) + if watched_playlist_details: # Playlist is being watched + if playlist_info.get("tracks") and playlist_info["tracks"].get("items"): + for item in playlist_info["tracks"]["items"]: + if item and item.get("track") and item["track"].get("id"): + track_id = item["track"]["id"] + item["track"]["is_locally_known"] = is_track_in_playlist_db( + playlist_info["id"], track_id + ) + elif item and item.get( + "track" + ): # Track object exists but no ID + item["track"]["is_locally_known"] = False # If not watched, or no tracks, is_locally_known will not be added, or tracks won't exist to add it to. # Frontend should handle absence of this key as false. return Response( - json.dumps(playlist_info), - status=200, - mimetype='application/json' + json.dumps(playlist_info), status=200, mimetype="application/json" ) except Exception as e: - error_data = { - "error": str(e), - "traceback": traceback.format_exc() - } - return Response( - json.dumps(error_data), - status=500, - mimetype='application/json' - ) + error_data = {"error": str(e), "traceback": traceback.format_exc()} + return Response(json.dumps(error_data), status=500, mimetype="application/json") -@playlist_bp.route('/watch/', methods=['PUT']) + +@playlist_bp.route("/watch/", methods=["PUT"]) def add_to_watchlist(playlist_spotify_id): """Adds a playlist to the watchlist.""" watch_config = get_watch_config() @@ -188,15 +216,23 @@ def add_to_watchlist(playlist_spotify_id): try: # Check if already watched if get_watched_playlist(playlist_spotify_id): - return jsonify({"message": f"Playlist {playlist_spotify_id} is already being watched."}), 200 + return jsonify( + {"message": f"Playlist {playlist_spotify_id} is already being watched."} + ), 200 # Fetch playlist details from Spotify to populate our DB playlist_data = get_spotify_info(playlist_spotify_id, "playlist") - if not playlist_data or 'id' not in playlist_data: - logger.error(f"Could not fetch details for playlist {playlist_spotify_id} from Spotify.") - return jsonify({"error": f"Could not fetch details for playlist {playlist_spotify_id} from Spotify."}), 404 + if not playlist_data or "id" not in playlist_data: + logger.error( + f"Could not fetch details for playlist {playlist_spotify_id} from Spotify." + ) + return jsonify( + { + "error": f"Could not fetch details for playlist {playlist_spotify_id} from Spotify." + } + ), 404 - add_playlist_db(playlist_data) # This also creates the tracks table + add_playlist_db(playlist_data) # This also creates the tracks table # REMOVED: Do not add initial tracks directly to DB. # The playlist watch manager will pick them up as new and queue downloads. @@ -205,14 +241,24 @@ def add_to_watchlist(playlist_spotify_id): # if initial_track_items: # from routes.utils.watch.db import add_tracks_to_playlist_db # Keep local import for clarity # add_tracks_to_playlist_db(playlist_spotify_id, initial_track_items) - - logger.info(f"Playlist {playlist_spotify_id} added to watchlist. Its tracks will be processed by the watch manager.") - return jsonify({"message": f"Playlist {playlist_spotify_id} added to watchlist. Tracks will be processed shortly."}), 201 + + logger.info( + f"Playlist {playlist_spotify_id} added to watchlist. Its tracks will be processed by the watch manager." + ) + return jsonify( + { + "message": f"Playlist {playlist_spotify_id} added to watchlist. Tracks will be processed shortly." + } + ), 201 except Exception as e: - logger.error(f"Error adding playlist {playlist_spotify_id} to watchlist: {e}", exc_info=True) + logger.error( + f"Error adding playlist {playlist_spotify_id} to watchlist: {e}", + exc_info=True, + ) return jsonify({"error": f"Could not add playlist to watchlist: {str(e)}"}), 500 -@playlist_bp.route('/watch//status', methods=['GET']) + +@playlist_bp.route("/watch//status", methods=["GET"]) def get_playlist_watch_status(playlist_spotify_id): """Checks if a specific playlist is being watched.""" logger.info(f"Checking watch status for playlist {playlist_spotify_id}.") @@ -225,10 +271,14 @@ def get_playlist_watch_status(playlist_spotify_id): # between "not watched" and an actual error fetching status. return jsonify({"is_watched": False}), 200 except Exception as e: - logger.error(f"Error checking watch status for playlist {playlist_spotify_id}: {e}", exc_info=True) + logger.error( + f"Error checking watch status for playlist {playlist_spotify_id}: {e}", + exc_info=True, + ) return jsonify({"error": f"Could not check watch status: {str(e)}"}), 500 -@playlist_bp.route('/watch/', methods=['DELETE']) + +@playlist_bp.route("/watch/", methods=["DELETE"]) def remove_from_watchlist(playlist_spotify_id): """Removes a playlist from the watchlist.""" watch_config = get_watch_config() @@ -238,76 +288,149 @@ def remove_from_watchlist(playlist_spotify_id): logger.info(f"Attempting to remove playlist {playlist_spotify_id} from watchlist.") try: if not get_watched_playlist(playlist_spotify_id): - return jsonify({"error": f"Playlist {playlist_spotify_id} not found in watchlist."}), 404 - - remove_playlist_db(playlist_spotify_id) - logger.info(f"Playlist {playlist_spotify_id} removed from watchlist successfully.") - return jsonify({"message": f"Playlist {playlist_spotify_id} removed from watchlist."}), 200 - except Exception as e: - logger.error(f"Error removing playlist {playlist_spotify_id} from watchlist: {e}", exc_info=True) - return jsonify({"error": f"Could not remove playlist from watchlist: {str(e)}"}), 500 + return jsonify( + {"error": f"Playlist {playlist_spotify_id} not found in watchlist."} + ), 404 -@playlist_bp.route('/watch//tracks', methods=['POST']) + remove_playlist_db(playlist_spotify_id) + logger.info( + f"Playlist {playlist_spotify_id} removed from watchlist successfully." + ) + return jsonify( + {"message": f"Playlist {playlist_spotify_id} removed from watchlist."} + ), 200 + except Exception as e: + logger.error( + f"Error removing playlist {playlist_spotify_id} from watchlist: {e}", + exc_info=True, + ) + return jsonify( + {"error": f"Could not remove playlist from watchlist: {str(e)}"} + ), 500 + + +@playlist_bp.route("/watch//tracks", methods=["POST"]) def mark_tracks_as_known(playlist_spotify_id): """Fetches details for given track IDs and adds/updates them in the playlist's local DB table.""" watch_config = get_watch_config() if not watch_config.get("enabled", False): - return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark tracks."}), 403 + return jsonify( + { + "error": "Watch feature is currently disabled globally. Cannot mark tracks." + } + ), 403 - logger.info(f"Attempting to mark tracks as known for playlist {playlist_spotify_id}.") + logger.info( + f"Attempting to mark tracks as known for playlist {playlist_spotify_id}." + ) try: track_ids = request.json - if not isinstance(track_ids, list) or not all(isinstance(tid, str) for tid in track_ids): - return jsonify({"error": "Invalid request body. Expecting a JSON array of track Spotify IDs."}), 400 - + if not isinstance(track_ids, list) or not all( + isinstance(tid, str) for tid in track_ids + ): + return jsonify( + { + "error": "Invalid request body. Expecting a JSON array of track Spotify IDs." + } + ), 400 + if not get_watched_playlist(playlist_spotify_id): - return jsonify({"error": f"Playlist {playlist_spotify_id} is not being watched."}), 404 + return jsonify( + {"error": f"Playlist {playlist_spotify_id} is not being watched."} + ), 404 fetched_tracks_details = [] for track_id in track_ids: try: track_detail = get_spotify_info(track_id, "track") - if track_detail and track_detail.get('id'): + if track_detail and track_detail.get("id"): fetched_tracks_details.append(track_detail) else: - logger.warning(f"Could not fetch details for track {track_id} when marking as known for playlist {playlist_spotify_id}.") + logger.warning( + f"Could not fetch details for track {track_id} when marking as known for playlist {playlist_spotify_id}." + ) except Exception as e: - logger.error(f"Failed to fetch Spotify details for track {track_id}: {e}") - - if not fetched_tracks_details: - return jsonify({"message": "No valid track details could be fetched to mark as known.", "processed_count": 0}), 200 + logger.error( + f"Failed to fetch Spotify details for track {track_id}: {e}" + ) - add_specific_tracks_to_playlist_table(playlist_spotify_id, fetched_tracks_details) - logger.info(f"Successfully marked/updated {len(fetched_tracks_details)} tracks as known for playlist {playlist_spotify_id}.") - return jsonify({"message": f"Successfully processed {len(fetched_tracks_details)} tracks for playlist {playlist_spotify_id}."}), 200 + if not fetched_tracks_details: + return jsonify( + { + "message": "No valid track details could be fetched to mark as known.", + "processed_count": 0, + } + ), 200 + + add_specific_tracks_to_playlist_table( + playlist_spotify_id, fetched_tracks_details + ) + logger.info( + f"Successfully marked/updated {len(fetched_tracks_details)} tracks as known for playlist {playlist_spotify_id}." + ) + return jsonify( + { + "message": f"Successfully processed {len(fetched_tracks_details)} tracks for playlist {playlist_spotify_id}." + } + ), 200 except Exception as e: - logger.error(f"Error marking tracks as known for playlist {playlist_spotify_id}: {e}", exc_info=True) + logger.error( + f"Error marking tracks as known for playlist {playlist_spotify_id}: {e}", + exc_info=True, + ) return jsonify({"error": f"Could not mark tracks as known: {str(e)}"}), 500 -@playlist_bp.route('/watch//tracks', methods=['DELETE']) + +@playlist_bp.route("/watch//tracks", methods=["DELETE"]) def mark_tracks_as_missing_locally(playlist_spotify_id): """Removes specified tracks from the playlist's local DB table.""" watch_config = get_watch_config() if not watch_config.get("enabled", False): - return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark tracks."}), 403 + return jsonify( + { + "error": "Watch feature is currently disabled globally. Cannot mark tracks." + } + ), 403 - logger.info(f"Attempting to mark tracks as missing (remove locally) for playlist {playlist_spotify_id}.") + logger.info( + f"Attempting to mark tracks as missing (remove locally) for playlist {playlist_spotify_id}." + ) try: track_ids = request.json - if not isinstance(track_ids, list) or not all(isinstance(tid, str) for tid in track_ids): - return jsonify({"error": "Invalid request body. Expecting a JSON array of track Spotify IDs."}), 400 + if not isinstance(track_ids, list) or not all( + isinstance(tid, str) for tid in track_ids + ): + return jsonify( + { + "error": "Invalid request body. Expecting a JSON array of track Spotify IDs." + } + ), 400 if not get_watched_playlist(playlist_spotify_id): - return jsonify({"error": f"Playlist {playlist_spotify_id} is not being watched."}), 404 + return jsonify( + {"error": f"Playlist {playlist_spotify_id} is not being watched."} + ), 404 - deleted_count = remove_specific_tracks_from_playlist_table(playlist_spotify_id, track_ids) - logger.info(f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}.") - return jsonify({"message": f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}."}), 200 + deleted_count = remove_specific_tracks_from_playlist_table( + playlist_spotify_id, track_ids + ) + logger.info( + f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}." + ) + return jsonify( + { + "message": f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}." + } + ), 200 except Exception as e: - logger.error(f"Error marking tracks as missing (deleting locally) for playlist {playlist_spotify_id}: {e}", exc_info=True) + logger.error( + f"Error marking tracks as missing (deleting locally) for playlist {playlist_spotify_id}: {e}", + exc_info=True, + ) return jsonify({"error": f"Could not mark tracks as missing: {str(e)}"}), 500 -@playlist_bp.route('/watch/list', methods=['GET']) + +@playlist_bp.route("/watch/list", methods=["GET"]) def list_watched_playlists_endpoint(): """Lists all playlists currently in the watchlist.""" try: @@ -317,43 +440,86 @@ def list_watched_playlists_endpoint(): logger.error(f"Error listing watched playlists: {e}", exc_info=True) return jsonify({"error": f"Could not list watched playlists: {str(e)}"}), 500 -@playlist_bp.route('/watch/trigger_check', methods=['POST']) + +@playlist_bp.route("/watch/trigger_check", methods=["POST"]) def trigger_playlist_check_endpoint(): """Manually triggers the playlist checking mechanism for all watched playlists.""" watch_config = get_watch_config() if not watch_config.get("enabled", False): - return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403 + return jsonify( + { + "error": "Watch feature is currently disabled globally. Cannot trigger check." + } + ), 403 logger.info("Manual trigger for playlist check received for all playlists.") try: # Run check_watched_playlists without an ID to check all thread = threading.Thread(target=check_watched_playlists, args=(None,)) thread.start() - return jsonify({"message": "Playlist check triggered successfully in the background for all playlists."}), 202 + return jsonify( + { + "message": "Playlist check triggered successfully in the background for all playlists." + } + ), 202 except Exception as e: - logger.error(f"Error manually triggering playlist check for all: {e}", exc_info=True) - return jsonify({"error": f"Could not trigger playlist check for all: {str(e)}"}), 500 + logger.error( + f"Error manually triggering playlist check for all: {e}", exc_info=True + ) + return jsonify( + {"error": f"Could not trigger playlist check for all: {str(e)}"} + ), 500 -@playlist_bp.route('/watch/trigger_check/', methods=['POST']) + +@playlist_bp.route( + "/watch/trigger_check/", methods=["POST"] +) def trigger_specific_playlist_check_endpoint(playlist_spotify_id: str): """Manually triggers the playlist checking mechanism for a specific playlist.""" watch_config = get_watch_config() if not watch_config.get("enabled", False): - return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403 + return jsonify( + { + "error": "Watch feature is currently disabled globally. Cannot trigger check." + } + ), 403 - logger.info(f"Manual trigger for specific playlist check received for ID: {playlist_spotify_id}") + logger.info( + f"Manual trigger for specific playlist check received for ID: {playlist_spotify_id}" + ) try: # Check if the playlist is actually in the watchlist first watched_playlist = get_watched_playlist(playlist_spotify_id) if not watched_playlist: - logger.warning(f"Trigger specific check: Playlist ID {playlist_spotify_id} not found in watchlist.") - return jsonify({"error": f"Playlist {playlist_spotify_id} is not in the watchlist. Add it first."}), 404 + logger.warning( + f"Trigger specific check: Playlist ID {playlist_spotify_id} not found in watchlist." + ) + return jsonify( + { + "error": f"Playlist {playlist_spotify_id} is not in the watchlist. Add it first." + } + ), 404 # Run check_watched_playlists with the specific ID - thread = threading.Thread(target=check_watched_playlists, args=(playlist_spotify_id,)) + thread = threading.Thread( + target=check_watched_playlists, args=(playlist_spotify_id,) + ) thread.start() - logger.info(f"Playlist check triggered in background for specific playlist ID: {playlist_spotify_id}") - return jsonify({"message": f"Playlist check triggered successfully in the background for {playlist_spotify_id}."}), 202 + logger.info( + f"Playlist check triggered in background for specific playlist ID: {playlist_spotify_id}" + ) + return jsonify( + { + "message": f"Playlist check triggered successfully in the background for {playlist_spotify_id}." + } + ), 202 except Exception as e: - logger.error(f"Error manually triggering specific playlist check for {playlist_spotify_id}: {e}", exc_info=True) - return jsonify({"error": f"Could not trigger playlist check for {playlist_spotify_id}: {str(e)}"}), 500 + logger.error( + f"Error manually triggering specific playlist check for {playlist_spotify_id}: {e}", + exc_info=True, + ) + return jsonify( + { + "error": f"Could not trigger playlist check for {playlist_spotify_id}: {str(e)}" + } + ), 500 diff --git a/routes/prgs.py b/routes/prgs.py index 330e0b0..5795ee8 100755 --- a/routes/prgs.py +++ b/routes/prgs.py @@ -1,6 +1,4 @@ -from flask import Blueprint, abort, jsonify, Response, stream_with_context, request -import os -import json +from flask import Blueprint, abort, jsonify, request import logging import time @@ -11,26 +9,26 @@ from routes.utils.celery_tasks import ( get_all_tasks, cancel_task, retry_task, - ProgressState, - redis_client + redis_client, ) # Configure logging logger = logging.getLogger(__name__) -prgs_bp = Blueprint('prgs', __name__, url_prefix='/api/prgs') +prgs_bp = Blueprint("prgs", __name__, url_prefix="/api/prgs") # (Old .prg file system removed. Using new task system only.) -@prgs_bp.route('/', methods=['GET']) + +@prgs_bp.route("/", methods=["GET"]) def get_prg_file(task_id): """ Return a JSON object with the resource type, its name (title), the last progress update, and, if available, the original request parameters. - + This function works with both the old PRG file system (for backward compatibility) and the new task ID based system. - + Args: task_id: Either a task UUID from Celery or a PRG filename from the old system """ @@ -49,20 +47,31 @@ def get_prg_file(task_id): if download_type and item_url: try: # Extract the ID from the item_url (last part of the path) - item_id = item_url.split('/')[-1] + item_id = item_url.split("/")[-1] if item_id: # Ensure item_id is not empty - base_url = request.host_url.rstrip('/') - dynamic_original_url = f"{base_url}/api/{download_type}/download/{item_id}" + base_url = request.host_url.rstrip("/") + dynamic_original_url = ( + f"{base_url}/api/{download_type}/download/{item_id}" + ) else: - logger.warning(f"Could not extract item ID from URL: {item_url} for task {task_id}. Falling back for original_url.") + logger.warning( + f"Could not extract item ID from URL: {item_url} for task {task_id}. Falling back for original_url." + ) original_request_obj = task_info.get("original_request", {}) dynamic_original_url = original_request_obj.get("original_url", "") except Exception as e: - logger.error(f"Error constructing dynamic original_url for task {task_id}: {e}", exc_info=True) + logger.error( + f"Error constructing dynamic original_url for task {task_id}: {e}", + exc_info=True, + ) original_request_obj = task_info.get("original_request", {}) - dynamic_original_url = original_request_obj.get("original_url", "") # Fallback on any error + dynamic_original_url = original_request_obj.get( + "original_url", "" + ) # Fallback on any error else: - logger.warning(f"Missing download_type ('{download_type}') or item_url ('{item_url}') in task_info for task {task_id}. Falling back for original_url.") + logger.warning( + f"Missing download_type ('{download_type}') or item_url ('{item_url}') in task_info for task {task_id}. Falling back for original_url." + ) original_request_obj = task_info.get("original_request", {}) dynamic_original_url = original_request_obj.get("original_url", "") @@ -73,17 +82,17 @@ def get_prg_file(task_id): "last_line": last_status, "timestamp": time.time(), "task_id": task_id, - "status_count": status_count + "status_count": status_count, } return jsonify(response) -@prgs_bp.route('/delete/', methods=['DELETE']) +@prgs_bp.route("/delete/", methods=["DELETE"]) def delete_prg_file(task_id): """ Delete a task's information and history. Works with both the old PRG file system and the new task ID based system. - + Args: task_id: Either a task UUID from Celery or a PRG filename from the old system """ @@ -92,114 +101,138 @@ def delete_prg_file(task_id): if not task_info: abort(404, "Task not found") cancel_task(task_id) - from routes.utils.celery_tasks import redis_client redis_client.delete(f"task:{task_id}:info") redis_client.delete(f"task:{task_id}:status") - return {'message': f'Task {task_id} deleted successfully'}, 200 + return {"message": f"Task {task_id} deleted successfully"}, 200 -@prgs_bp.route('/list', methods=['GET']) +@prgs_bp.route("/list", methods=["GET"]) def list_prg_files(): """ Retrieve a list of all tasks in the system. Returns a detailed list of task objects including status and metadata. """ try: - tasks = get_all_tasks() # This already gets summary data + tasks = get_all_tasks() # This already gets summary data detailed_tasks = [] for task_summary in tasks: task_id = task_summary.get("task_id") if not task_id: continue - + task_info = get_task_info(task_id) last_status = get_last_task_status(task_id) - + if task_info and last_status: - detailed_tasks.append({ - "task_id": task_id, - "type": task_info.get("type", task_summary.get("type", "unknown")), - "name": task_info.get("name", task_summary.get("name", "Unknown")), - "artist": task_info.get("artist", task_summary.get("artist", "")), - "download_type": task_info.get("download_type", task_summary.get("download_type", "unknown")), - "status": last_status.get("status", "unknown"), # Keep summary status for quick access - "last_status_obj": last_status, # Full last status object - "original_request": task_info.get("original_request", {}), - "created_at": task_info.get("created_at", 0), - "timestamp": last_status.get("timestamp", task_info.get("created_at", 0)) - }) - elif task_info: # If last_status is somehow missing, still provide some info - detailed_tasks.append({ - "task_id": task_id, - "type": task_info.get("type", "unknown"), - "name": task_info.get("name", "Unknown"), - "artist": task_info.get("artist", ""), - "download_type": task_info.get("download_type", "unknown"), - "status": "unknown", - "last_status_obj": None, - "original_request": task_info.get("original_request", {}), - "created_at": task_info.get("created_at", 0), - "timestamp": task_info.get("created_at", 0) - }) + detailed_tasks.append( + { + "task_id": task_id, + "type": task_info.get( + "type", task_summary.get("type", "unknown") + ), + "name": task_info.get( + "name", task_summary.get("name", "Unknown") + ), + "artist": task_info.get( + "artist", task_summary.get("artist", "") + ), + "download_type": task_info.get( + "download_type", + task_summary.get("download_type", "unknown"), + ), + "status": last_status.get( + "status", "unknown" + ), # Keep summary status for quick access + "last_status_obj": last_status, # Full last status object + "original_request": task_info.get("original_request", {}), + "created_at": task_info.get("created_at", 0), + "timestamp": last_status.get( + "timestamp", task_info.get("created_at", 0) + ), + } + ) + elif ( + task_info + ): # If last_status is somehow missing, still provide some info + detailed_tasks.append( + { + "task_id": task_id, + "type": task_info.get("type", "unknown"), + "name": task_info.get("name", "Unknown"), + "artist": task_info.get("artist", ""), + "download_type": task_info.get("download_type", "unknown"), + "status": "unknown", + "last_status_obj": None, + "original_request": task_info.get("original_request", {}), + "created_at": task_info.get("created_at", 0), + "timestamp": task_info.get("created_at", 0), + } + ) # Sort tasks by creation time (newest first, or by timestamp if creation time is missing) - detailed_tasks.sort(key=lambda x: x.get('timestamp', x.get('created_at', 0)), reverse=True) - + detailed_tasks.sort( + key=lambda x: x.get("timestamp", x.get("created_at", 0)), reverse=True + ) + return jsonify(detailed_tasks) except Exception as e: logger.error(f"Error in /api/prgs/list: {e}", exc_info=True) return jsonify({"error": "Failed to retrieve task list"}), 500 -@prgs_bp.route('/retry/', methods=['POST']) +@prgs_bp.route("/retry/", methods=["POST"]) def retry_task_endpoint(task_id): """ Retry a failed task. - + Args: task_id: The ID of the task to retry """ try: # First check if this is a task ID in the new system task_info = get_task_info(task_id) - + if task_info: # This is a task ID in the new system result = retry_task(task_id) return jsonify(result) - + # If not found in new system, we need to handle the old system retry # For now, return an error as we're transitioning to the new system - return jsonify({ - "status": "error", - "message": "Retry for old system is not supported in the new API. Please use the new task ID format." - }), 400 + return jsonify( + { + "status": "error", + "message": "Retry for old system is not supported in the new API. Please use the new task ID format.", + } + ), 400 except Exception as e: abort(500, f"An error occurred: {e}") -@prgs_bp.route('/cancel/', methods=['POST']) +@prgs_bp.route("/cancel/", methods=["POST"]) def cancel_task_endpoint(task_id): """ Cancel a running or queued task. - + Args: task_id: The ID of the task to cancel """ try: # First check if this is a task ID in the new system task_info = get_task_info(task_id) - + if task_info: # This is a task ID in the new system result = cancel_task(task_id) return jsonify(result) - + # If not found in new system, we need to handle the old system cancellation # For now, return an error as we're transitioning to the new system - return jsonify({ - "status": "error", - "message": "Cancellation for old system is not supported in the new API. Please use the new task ID format." - }), 400 + return jsonify( + { + "status": "error", + "message": "Cancellation for old system is not supported in the new API. Please use the new task ID format.", + } + ), 400 except Exception as e: abort(500, f"An error occurred: {e}") diff --git a/routes/search.py b/routes/search.py index 1d31f38..602a2c9 100755 --- a/routes/search.py +++ b/routes/search.py @@ -1,66 +1,67 @@ from flask import Blueprint, jsonify, request -import logging from routes.utils.search import search # Corrected import from routes.config import get_config # Import get_config function -search_bp = Blueprint('search', __name__) +search_bp = Blueprint("search", __name__) -@search_bp.route('/search', methods=['GET']) + +@search_bp.route("/search", methods=["GET"]) def handle_search(): try: # Get query parameters - query = request.args.get('q', '') - search_type = request.args.get('search_type', '') - limit = int(request.args.get('limit', 10)) - main = request.args.get('main', '') # Get the main parameter for account selection + query = request.args.get("q", "") + search_type = request.args.get("search_type", "") + limit = int(request.args.get("limit", 10)) + main = request.args.get( + "main", "" + ) # Get the main parameter for account selection # If main parameter is not provided in the request, get it from config if not main: config = get_config() - if config and 'spotify' in config: - main = config['spotify'] + if config and "spotify" in config: + main = config["spotify"] print(f"Using main from config: {main}") - # Validate parameters if not query: - return jsonify({'error': 'Missing search query'}), 400 + return jsonify({"error": "Missing search query"}), 400 - valid_types = ['track', 'album', 'artist', 'playlist', 'episode'] + valid_types = ["track", "album", "artist", "playlist", "episode"] if search_type not in valid_types: - return jsonify({'error': 'Invalid search type'}), 400 + return jsonify({"error": "Invalid search type"}), 400 # Perform the search with corrected parameter name raw_results = search( query=query, search_type=search_type, # Fixed parameter name limit=limit, - main=main # Pass the main parameter + main=main, # Pass the main parameter ) - # Extract items from the appropriate section of the response based on search_type items = [] - if raw_results and search_type + 's' in raw_results: - type_key = search_type + 's' - items = raw_results[type_key].get('items', []) + if raw_results and search_type + "s" in raw_results: + type_key = search_type + "s" + items = raw_results[type_key].get("items", []) elif raw_results and search_type in raw_results: + items = raw_results[search_type].get("items", []) - items = raw_results[search_type].get('items', []) - - # Return both the items array and the full data for debugging - return jsonify({ - 'items': items, - 'data': raw_results, # Include full data for debugging - 'error': None - }) + return jsonify( + { + "items": items, + "data": raw_results, # Include full data for debugging + "error": None, + } + ) except ValueError as e: print(f"ValueError in search: {str(e)}") - return jsonify({'error': str(e)}), 400 + return jsonify({"error": str(e)}), 400 except Exception as e: import traceback + print(f"Exception in search: {str(e)}") print(traceback.format_exc()) - return jsonify({'error': f'Internal server error: {str(e)}'}), 500 \ No newline at end of file + return jsonify({"error": f"Internal server error: {str(e)}"}), 500 diff --git a/routes/track.py b/routes/track.py index f1902b9..01406c9 100755 --- a/routes/track.py +++ b/routes/track.py @@ -1,156 +1,179 @@ from flask import Blueprint, Response, request -import os import json import traceback -import uuid # For generating error task IDs -import time # For timestamps +import uuid # For generating error task IDs +import time # For timestamps from routes.utils.celery_queue_manager import download_queue_manager -from routes.utils.celery_tasks import store_task_info, store_task_status, ProgressState # For error task creation +from routes.utils.celery_tasks import ( + store_task_info, + store_task_status, + ProgressState, +) # For error task creation from urllib.parse import urlparse # for URL validation -from routes.utils.get_info import get_spotify_info # Added import +from routes.utils.get_info import get_spotify_info # Added import -track_bp = Blueprint('track', __name__) +track_bp = Blueprint("track", __name__) -@track_bp.route('/download/', methods=['GET']) + +@track_bp.route("/download/", methods=["GET"]) def handle_download(track_id): # Retrieve essential parameters from the request. # name = request.args.get('name') # Removed # artist = request.args.get('artist') # Removed orig_params = request.args.to_dict() - + # Construct the URL from track_id url = f"https://open.spotify.com/track/{track_id}" - orig_params["original_url"] = url # Update original_url to the constructed one + orig_params["original_url"] = url # Update original_url to the constructed one # Fetch metadata from Spotify try: track_info = get_spotify_info(track_id, "track") - if not track_info or not track_info.get('name') or not track_info.get('artists'): + if ( + not track_info + or not track_info.get("name") + or not track_info.get("artists") + ): return Response( - json.dumps({"error": f"Could not retrieve metadata for track ID: {track_id}"}), + json.dumps( + {"error": f"Could not retrieve metadata for track ID: {track_id}"} + ), status=404, - mimetype='application/json' + mimetype="application/json", ) - - name_from_spotify = track_info.get('name') - artist_from_spotify = track_info['artists'][0].get('name') if track_info['artists'] else "Unknown Artist" + + name_from_spotify = track_info.get("name") + artist_from_spotify = ( + track_info["artists"][0].get("name") + if track_info["artists"] + else "Unknown Artist" + ) except Exception as e: return Response( - json.dumps({"error": f"Failed to fetch metadata for track {track_id}: {str(e)}"}), + json.dumps( + {"error": f"Failed to fetch metadata for track {track_id}: {str(e)}"} + ), status=500, - mimetype='application/json' + mimetype="application/json", ) - + # Validate required parameters if not url: return Response( - json.dumps({"error": "Missing required parameter: url", "original_url": url}), + json.dumps( + {"error": "Missing required parameter: url", "original_url": url} + ), status=400, - mimetype='application/json' + mimetype="application/json", ) # Validate URL domain parsed = urlparse(url) host = parsed.netloc.lower() - if not (host.endswith('deezer.com') or host.endswith('open.spotify.com') or host.endswith('spotify.com')): + if not ( + host.endswith("deezer.com") + or host.endswith("open.spotify.com") + or host.endswith("spotify.com") + ): return Response( json.dumps({"error": f"Invalid Link {url} :(", "original_url": url}), status=400, - mimetype='application/json' + mimetype="application/json", ) - + try: - task_id = download_queue_manager.add_task({ - "download_type": "track", - "url": url, - "name": name_from_spotify, # Use fetched name - "artist": artist_from_spotify, # Use fetched artist - "orig_request": orig_params - }) + task_id = download_queue_manager.add_task( + { + "download_type": "track", + "url": url, + "name": name_from_spotify, # Use fetched name + "artist": artist_from_spotify, # Use fetched artist + "orig_request": orig_params, + } + ) # Removed DuplicateDownloadError handling, add_task now manages this by creating an error task. except Exception as e: # Generic error handling for other issues during task submission error_task_id = str(uuid.uuid4()) - store_task_info(error_task_id, { - "download_type": "track", - "url": url, - "name": name_from_spotify, # Use fetched name - "artist": artist_from_spotify, # Use fetched artist - "original_request": orig_params, - "created_at": time.time(), - "is_submission_error_task": True - }) - store_task_status(error_task_id, { - "status": ProgressState.ERROR, - "error": f"Failed to queue track download: {str(e)}", - "timestamp": time.time() - }) - return Response( - json.dumps({"error": f"Failed to queue track download: {str(e)}", "task_id": error_task_id}), - status=500, - mimetype='application/json' + store_task_info( + error_task_id, + { + "download_type": "track", + "url": url, + "name": name_from_spotify, # Use fetched name + "artist": artist_from_spotify, # Use fetched artist + "original_request": orig_params, + "created_at": time.time(), + "is_submission_error_task": True, + }, ) - + store_task_status( + error_task_id, + { + "status": ProgressState.ERROR, + "error": f"Failed to queue track download: {str(e)}", + "timestamp": time.time(), + }, + ) + return Response( + json.dumps( + { + "error": f"Failed to queue track download: {str(e)}", + "task_id": error_task_id, + } + ), + status=500, + mimetype="application/json", + ) + return Response( - json.dumps({"prg_file": task_id}), # prg_file is the old name for task_id + json.dumps({"prg_file": task_id}), # prg_file is the old name for task_id status=202, - mimetype='application/json' + mimetype="application/json", ) -@track_bp.route('/download/cancel', methods=['GET']) + +@track_bp.route("/download/cancel", methods=["GET"]) def cancel_download(): """ Cancel a running track download process by its process id (prg file name). """ - prg_file = request.args.get('prg_file') + prg_file = request.args.get("prg_file") if not prg_file: return Response( json.dumps({"error": "Missing process id (prg_file) parameter"}), status=400, - mimetype='application/json' + mimetype="application/json", ) - + # Use the queue manager's cancellation method. result = download_queue_manager.cancel_task(prg_file) status_code = 200 if result.get("status") == "cancelled" else 404 - - return Response( - json.dumps(result), - status=status_code, - mimetype='application/json' - ) -@track_bp.route('/info', methods=['GET']) + return Response(json.dumps(result), status=status_code, mimetype="application/json") + + +@track_bp.route("/info", methods=["GET"]) def get_track_info(): """ Retrieve Spotify track metadata given a Spotify track ID. Expects a query parameter 'id' that contains the Spotify track ID. """ - spotify_id = request.args.get('id') - + spotify_id = request.args.get("id") + if not spotify_id: return Response( json.dumps({"error": "Missing parameter: id"}), status=400, - mimetype='application/json' + mimetype="application/json", ) - + try: # Import and use the get_spotify_info function from the utility module. from routes.utils.get_info import get_spotify_info + track_info = get_spotify_info(spotify_id, "track") - return Response( - json.dumps(track_info), - status=200, - mimetype='application/json' - ) + return Response(json.dumps(track_info), status=200, mimetype="application/json") except Exception as e: - error_data = { - "error": str(e), - "traceback": traceback.format_exc() - } - return Response( - json.dumps(error_data), - status=500, - mimetype='application/json' - ) + error_data = {"error": str(e), "traceback": traceback.format_exc()} + return Response(json.dumps(error_data), status=500, mimetype="application/json") diff --git a/routes/utils/album.py b/routes/utils/album.py index 3406c7c..63e1cce 100755 --- a/routes/utils/album.py +++ b/routes/utils/album.py @@ -1,11 +1,12 @@ -import os -import json import traceback from deezspot.spotloader import SpoLogin from deezspot.deezloader import DeeLogin -from pathlib import Path -from routes.utils.credentials import get_credential, _get_global_spotify_api_creds, get_spotify_blob_path -from routes.utils.celery_config import get_config_params +from routes.utils.credentials import ( + get_credential, + _get_global_spotify_api_creds, + get_spotify_blob_path, +) + def download_album( url, @@ -23,56 +24,68 @@ def download_album( max_retries=3, progress_callback=None, convert_to=None, - bitrate=None + bitrate=None, ): try: # Detect URL source (Spotify or Deezer) from URL - is_spotify_url = 'open.spotify.com' in url.lower() - is_deezer_url = 'deezer.com' in url.lower() - - service = '' + is_spotify_url = "open.spotify.com" in url.lower() + is_deezer_url = "deezer.com" in url.lower() + + service = "" if is_spotify_url: - service = 'spotify' + service = "spotify" elif is_deezer_url: - service = 'deezer' + service = "deezer" else: error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com" print(f"ERROR: {error_msg}") raise ValueError(error_msg) - + print(f"DEBUG: album.py - Service determined from URL: {service}") - print(f"DEBUG: album.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'") + print( + f"DEBUG: album.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'" + ) # Get global Spotify API credentials - global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds() + global_spotify_client_id, global_spotify_client_secret = ( + _get_global_spotify_api_creds() + ) if not global_spotify_client_id or not global_spotify_client_secret: warning_msg = "WARN: album.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail." print(warning_msg) - if service == 'spotify': - if fallback: # Fallback is a Deezer account name for a Spotify URL - if quality is None: quality = 'FLAC' # Deezer quality for first attempt - if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails) - + if service == "spotify": + if fallback: # Fallback is a Deezer account name for a Spotify URL + if quality is None: + quality = "FLAC" # Deezer quality for first attempt + if fall_quality is None: + fall_quality = ( + "HIGH" # Spotify quality for fallback (if Deezer fails) + ) + deezer_error = None try: # Attempt 1: Deezer via download_albumspo (using 'fallback' as Deezer account name) - print(f"DEBUG: album.py - Spotify URL. Attempt 1: Deezer (account: {fallback})") - deezer_fallback_creds = get_credential('deezer', fallback) - arl = deezer_fallback_creds.get('arl') + print( + f"DEBUG: album.py - Spotify URL. Attempt 1: Deezer (account: {fallback})" + ) + deezer_fallback_creds = get_credential("deezer", fallback) + arl = deezer_fallback_creds.get("arl") if not arl: - raise ValueError(f"ARL not found for Deezer account '{fallback}'.") - + raise ValueError( + f"ARL not found for Deezer account '{fallback}'." + ) + dl = DeeLogin( arl=arl, spotify_client_id=global_spotify_client_id, spotify_client_secret=global_spotify_client_secret, - progress_callback=progress_callback + progress_callback=progress_callback, ) dl.download_albumspo( - link_album=url, # Spotify URL + link_album=url, # Spotify URL output_dir="./downloads", - quality_download=quality, # Deezer quality + quality_download=quality, # Deezer quality recursive_quality=True, recursive_download=False, not_interface=False, @@ -85,34 +98,49 @@ def download_album( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, + ) + print( + f"DEBUG: album.py - Album download via Deezer (account: {fallback}) successful for Spotify URL." ) - print(f"DEBUG: album.py - Album download via Deezer (account: {fallback}) successful for Spotify URL.") except Exception as e: deezer_error = e - print(f"ERROR: album.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}") + print( + f"ERROR: album.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}" + ) traceback.print_exc() - print(f"DEBUG: album.py - Attempting Spotify direct download (account: {main} for blob)...") - + print( + f"DEBUG: album.py - Attempting Spotify direct download (account: {main} for blob)..." + ) + # Attempt 2: Spotify direct via download_album (using 'main' as Spotify account for blob) try: - if not global_spotify_client_id or not global_spotify_client_secret: - raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") + if ( + not global_spotify_client_id + or not global_spotify_client_secret + ): + raise ValueError( + "Global Spotify API credentials (client_id/secret) not configured for Spotify download." + ) blob_file_path = get_spotify_blob_path(main) if not blob_file_path or not blob_file_path.exists(): - raise FileNotFoundError(f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}") + raise FileNotFoundError( + f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}" + ) spo = SpoLogin( - credentials_path=str(blob_file_path), # Ensure it's a string + credentials_path=str( + blob_file_path + ), # Ensure it's a string spotify_client_id=global_spotify_client_id, spotify_client_secret=global_spotify_client_secret, - progress_callback=progress_callback + progress_callback=progress_callback, ) spo.download_album( - link_album=url, # Spotify URL + link_album=url, # Spotify URL output_dir="./downloads", - quality_download=fall_quality, # Spotify quality + quality_download=fall_quality, # Spotify quality recursive_quality=True, recursive_download=False, not_interface=False, @@ -126,36 +154,47 @@ def download_album( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, + ) + print( + f"DEBUG: album.py - Spotify direct download (account: {main} for blob) successful." ) - print(f"DEBUG: album.py - Spotify direct download (account: {main} for blob) successful.") except Exception as e2: - print(f"ERROR: album.py - Spotify direct download (account: {main} for blob) also failed: {e2}") + print( + f"ERROR: album.py - Spotify direct download (account: {main} for blob) also failed: {e2}" + ) raise RuntimeError( f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. " f"Deezer error: {deezer_error}, Spotify error: {e2}" ) from e2 else: # Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob) - if quality is None: quality = 'HIGH' # Default Spotify quality - print(f"DEBUG: album.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}") + if quality is None: + quality = "HIGH" # Default Spotify quality + print( + f"DEBUG: album.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}" + ) if not global_spotify_client_id or not global_spotify_client_secret: - raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") + raise ValueError( + "Global Spotify API credentials (client_id/secret) not configured for Spotify download." + ) blob_file_path = get_spotify_blob_path(main) if not blob_file_path or not blob_file_path.exists(): - raise FileNotFoundError(f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}") + raise FileNotFoundError( + f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}" + ) spo = SpoLogin( - credentials_path=str(blob_file_path), # Ensure it's a string + credentials_path=str(blob_file_path), # Ensure it's a string spotify_client_id=global_spotify_client_id, spotify_client_secret=global_spotify_client_secret, - progress_callback=progress_callback + progress_callback=progress_callback, ) spo.download_album( link_album=url, output_dir="./downloads", - quality_download=quality, + quality_download=quality, recursive_quality=True, recursive_download=False, not_interface=False, @@ -169,26 +208,31 @@ def download_album( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, ) - print(f"DEBUG: album.py - Direct Spotify download (account: {main} for blob) successful.") - - elif service == 'deezer': + print( + f"DEBUG: album.py - Direct Spotify download (account: {main} for blob) successful." + ) + + elif service == "deezer": # Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL) - if quality is None: quality = 'FLAC' # Default Deezer quality - print(f"DEBUG: album.py - Deezer URL. Direct download with Deezer account: {main}") - deezer_main_creds = get_credential('deezer', main) # For ARL - arl = deezer_main_creds.get('arl') + if quality is None: + quality = "FLAC" # Default Deezer quality + print( + f"DEBUG: album.py - Deezer URL. Direct download with Deezer account: {main}" + ) + deezer_main_creds = get_credential("deezer", main) # For ARL + arl = deezer_main_creds.get("arl") if not arl: raise ValueError(f"ARL not found for Deezer account '{main}'.") dl = DeeLogin( - arl=arl, # Account specific ARL - spotify_client_id=global_spotify_client_id, # Global Spotify keys - spotify_client_secret=global_spotify_client_secret, # Global Spotify keys - progress_callback=progress_callback + arl=arl, # Account specific ARL + spotify_client_id=global_spotify_client_id, # Global Spotify keys + spotify_client_secret=global_spotify_client_secret, # Global Spotify keys + progress_callback=progress_callback, ) - dl.download_albumdee( # Deezer URL, download via Deezer + dl.download_albumdee( # Deezer URL, download via Deezer link_album=url, output_dir="./downloads", quality_download=quality, @@ -203,9 +247,11 @@ def download_album( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, + ) + print( + f"DEBUG: album.py - Direct Deezer download (account: {main}) successful." ) - print(f"DEBUG: album.py - Direct Deezer download (account: {main}) successful.") else: # Should be caught by initial service check, but as a safeguard raise ValueError(f"Unsupported service determined: {service}") diff --git a/routes/utils/artist.py b/routes/utils/artist.py index 3bbecb0..88d706e 100644 --- a/routes/utils/artist.py +++ b/routes/utils/artist.py @@ -1,10 +1,7 @@ import json -import traceback -from pathlib import Path -import os import logging -from flask import Blueprint, Response, request, url_for -from routes.utils.celery_queue_manager import download_queue_manager, get_config_params +from flask import url_for +from routes.utils.celery_queue_manager import download_queue_manager from routes.utils.get_info import get_spotify_info from routes.utils.credentials import get_credential, _get_global_spotify_api_creds from routes.utils.celery_tasks import get_last_task_status, ProgressState @@ -15,12 +12,18 @@ from deezspot.libutils.utils import get_ids, link_is_valid # Configure logging logger = logging.getLogger(__name__) + def log_json(message_dict): """Helper function to output a JSON-formatted log message.""" print(json.dumps(message_dict)) -def get_artist_discography(url, main_spotify_account_name, album_type='album,single,compilation,appears_on', progress_callback=None): +def get_artist_discography( + url, + main_spotify_account_name, + album_type="album,single,compilation,appears_on", + progress_callback=None, +): """ Validate the URL, extract the artist ID, and retrieve the discography. Uses global Spotify API client_id/secret for Spo initialization. @@ -34,28 +37,41 @@ def get_artist_discography(url, main_spotify_account_name, album_type='album,sin log_json({"status": "error", "message": "No artist URL provided."}) raise ValueError("No artist URL provided.") - link_is_valid(link=url) # This will raise an exception if the link is invalid. - + link_is_valid(link=url) # This will raise an exception if the link is invalid. + client_id, client_secret = _get_global_spotify_api_creds() - + if not client_id or not client_secret: - log_json({"status": "error", "message": "Global Spotify API client_id or client_secret not configured."}) + log_json( + { + "status": "error", + "message": "Global Spotify API client_id or client_secret not configured.", + } + ) raise ValueError("Global Spotify API credentials are not configured.") if not main_spotify_account_name: # This is a warning now, as API keys are global. - logger.warning("main_spotify_account_name not provided for get_artist_discography context. Using global API keys.") + logger.warning( + "main_spotify_account_name not provided for get_artist_discography context. Using global API keys." + ) else: # Check if account exists for context, good for consistency try: - get_credential('spotify', main_spotify_account_name) - logger.debug(f"Spotify account context '{main_spotify_account_name}' exists for get_artist_discography.") + get_credential("spotify", main_spotify_account_name) + logger.debug( + f"Spotify account context '{main_spotify_account_name}' exists for get_artist_discography." + ) except FileNotFoundError: - logger.warning(f"Spotify account '{main_spotify_account_name}' provided for discography context not found.") + logger.warning( + f"Spotify account '{main_spotify_account_name}' provided for discography context not found." + ) except Exception as e: - logger.warning(f"Error checking Spotify account '{main_spotify_account_name}' for discography context: {e}") + logger.warning( + f"Error checking Spotify account '{main_spotify_account_name}' for discography context: {e}" + ) - Spo.__init__(client_id, client_secret) # Initialize with global API keys + Spo.__init__(client_id, client_secret) # Initialize with global API keys try: artist_id = get_ids(url) @@ -78,94 +94,108 @@ def get_artist_discography(url, main_spotify_account_name, album_type='album,sin raise -def download_artist_albums(url, album_type="album,single,compilation", request_args=None): +def download_artist_albums( + url, album_type="album,single,compilation", request_args=None +): """ Download albums by an artist, filtered by album types. - + Args: url (str): Spotify artist URL album_type (str): Comma-separated list of album types to download (album, single, compilation, appears_on) request_args (dict): Original request arguments for tracking - + Returns: tuple: (list of successfully queued albums, list of duplicate albums) """ if not url: raise ValueError("Missing required parameter: url") - + # Extract artist ID from URL - artist_id = url.split('/')[-1] - if '?' in artist_id: - artist_id = artist_id.split('?')[0] - + artist_id = url.split("/")[-1] + if "?" in artist_id: + artist_id = artist_id.split("?")[0] + logger.info(f"Fetching artist info for ID: {artist_id}") - + # Detect URL source (only Spotify is supported for artists) - is_spotify_url = 'open.spotify.com' in url.lower() - is_deezer_url = 'deezer.com' in url.lower() - + is_spotify_url = "open.spotify.com" in url.lower() + # Artist functionality only works with Spotify URLs currently if not is_spotify_url: - error_msg = "Invalid URL: Artist functionality only supports open.spotify.com URLs" + error_msg = ( + "Invalid URL: Artist functionality only supports open.spotify.com URLs" + ) logger.error(error_msg) raise ValueError(error_msg) - + # Get artist info with albums artist_data = get_spotify_info(artist_id, "artist_discography") - + # Debug logging to inspect the structure of artist_data - logger.debug(f"Artist data structure has keys: {list(artist_data.keys() if isinstance(artist_data, dict) else [])}") - - if not artist_data or 'items' not in artist_data: - raise ValueError(f"Failed to retrieve artist data or no albums found for artist ID {artist_id}") - + logger.debug( + f"Artist data structure has keys: {list(artist_data.keys() if isinstance(artist_data, dict) else [])}" + ) + + if not artist_data or "items" not in artist_data: + raise ValueError( + f"Failed to retrieve artist data or no albums found for artist ID {artist_id}" + ) + # Parse the album types to filter by allowed_types = [t.strip().lower() for t in album_type.split(",")] logger.info(f"Filtering albums by types: {allowed_types}") - - # Get artist name from the first album - artist_name = "" - if artist_data.get('items') and len(artist_data['items']) > 0: - first_album = artist_data['items'][0] - if first_album.get('artists') and len(first_album['artists']) > 0: - artist_name = first_album['artists'][0].get('name', '') - + # Filter albums by the specified types filtered_albums = [] - for album in artist_data.get('items', []): - album_type_value = album.get('album_type', '').lower() - album_group_value = album.get('album_group', '').lower() - + for album in artist_data.get("items", []): + album_type_value = album.get("album_type", "").lower() + album_group_value = album.get("album_group", "").lower() + # Apply filtering logic based on album_type and album_group - if (('album' in allowed_types and album_type_value == 'album' and album_group_value == 'album') or - ('single' in allowed_types and album_type_value == 'single' and album_group_value == 'single') or - ('compilation' in allowed_types and album_type_value == 'compilation') or - ('appears_on' in allowed_types and album_group_value == 'appears_on')): + if ( + ( + "album" in allowed_types + and album_type_value == "album" + and album_group_value == "album" + ) + or ( + "single" in allowed_types + and album_type_value == "single" + and album_group_value == "single" + ) + or ("compilation" in allowed_types and album_type_value == "compilation") + or ("appears_on" in allowed_types and album_group_value == "appears_on") + ): filtered_albums.append(album) - + if not filtered_albums: logger.warning(f"No albums match the specified types: {album_type}") return [], [] - + # Queue each album as a separate download task album_task_ids = [] successfully_queued_albums = [] - duplicate_albums = [] # To store info about albums that were duplicates - + duplicate_albums = [] # To store info about albums that were duplicates + for album in filtered_albums: # Add detailed logging to inspect each album's structure and URLs logger.debug(f"Processing album: {album.get('name', 'Unknown')}") logger.debug(f"Album structure has keys: {list(album.keys())}") - - external_urls = album.get('external_urls', {}) + + external_urls = album.get("external_urls", {}) logger.debug(f"Album external_urls: {external_urls}") - - album_url = external_urls.get('spotify', '') - album_name = album.get('name', 'Unknown Album') - album_artists = album.get('artists', []) - album_artist = album_artists[0].get('name', 'Unknown Artist') if album_artists else 'Unknown Artist' - album_id = album.get('id') + + album_url = external_urls.get("spotify", "") + album_name = album.get("name", "Unknown Album") + album_artists = album.get("artists", []) + album_artist = ( + album_artists[0].get("name", "Unknown Artist") + if album_artists + else "Unknown Artist" + ) + album_id = album.get("id") logger.debug(f"Extracted album URL: {album_url}") logger.debug(f"Extracted album ID: {album_id}") @@ -173,7 +203,7 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a if not album_url or not album_id: logger.warning(f"Skipping album without URL or ID: {album_name}") continue - + # Create album-specific request args instead of using original artist request album_request_args = { "url": album_url, @@ -182,12 +212,14 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a "type": "album", # URL source will be automatically detected in the download functions "parent_artist_url": url, - "parent_request_type": "artist" + "parent_request_type": "artist", } - + # Include original download URL for this album task - album_request_args["original_url"] = url_for('album.handle_download', album_id=album_id, _external=True) - + album_request_args["original_url"] = url_for( + "album.handle_download", album_id=album_id, _external=True + ) + # Create task for this album task_data = { "download_type": "album", @@ -196,44 +228,64 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a "retry_url": album_url, # Use album URL for retry logic, not artist URL "name": album_name, "artist": album_artist, - "orig_request": album_request_args # Store album-specific request params + "orig_request": album_request_args, # Store album-specific request params } - + # Debug log the task data being sent to the queue - logger.debug(f"Album task data: url={task_data['url']}, retry_url={task_data['retry_url']}") - + logger.debug( + f"Album task data: url={task_data['url']}, retry_url={task_data['retry_url']}" + ) + try: task_id = download_queue_manager.add_task(task_data) - + # Check the status of the newly added task to see if it was marked as a duplicate error last_status = get_last_task_status(task_id) - - if last_status and last_status.get("status") == ProgressState.ERROR and last_status.get("existing_task_id"): - logger.warning(f"Album {album_name} (URL: {album_url}) is a duplicate. Error task ID: {task_id}. Existing task ID: {last_status.get('existing_task_id')}") - duplicate_albums.append({ - "name": album_name, - "artist": album_artist, - "url": album_url, - "error_task_id": task_id, # This is the ID of the task marked as a duplicate error - "existing_task_id": last_status.get("existing_task_id"), - "message": last_status.get("message", "Duplicate download attempt.") - }) + + if ( + last_status + and last_status.get("status") == ProgressState.ERROR + and last_status.get("existing_task_id") + ): + logger.warning( + f"Album {album_name} (URL: {album_url}) is a duplicate. Error task ID: {task_id}. Existing task ID: {last_status.get('existing_task_id')}" + ) + duplicate_albums.append( + { + "name": album_name, + "artist": album_artist, + "url": album_url, + "error_task_id": task_id, # This is the ID of the task marked as a duplicate error + "existing_task_id": last_status.get("existing_task_id"), + "message": last_status.get( + "message", "Duplicate download attempt." + ), + } + ) else: # If not a duplicate error, it was successfully queued (or failed for other reasons handled by add_task) # We only add to successfully_queued_albums if it wasn't a duplicate error from add_task # Other errors from add_task (like submission failure) would also result in an error status for task_id # but won't have 'existing_task_id'. The client can check the status of this task_id. - album_task_ids.append(task_id) # Keep track of all task_ids returned by add_task - successfully_queued_albums.append({ - "name": album_name, - "artist": album_artist, - "url": album_url, - "task_id": task_id - }) + album_task_ids.append( + task_id + ) # Keep track of all task_ids returned by add_task + successfully_queued_albums.append( + { + "name": album_name, + "artist": album_artist, + "url": album_url, + "task_id": task_id, + } + ) logger.info(f"Queued album download: {album_name} ({task_id})") - except Exception as e: # Catch any other unexpected error from add_task itself (though it should be rare now) - logger.error(f"Failed to queue album {album_name} due to an unexpected error in add_task: {str(e)}") + except Exception as e: # Catch any other unexpected error from add_task itself (though it should be rare now) + logger.error( + f"Failed to queue album {album_name} due to an unexpected error in add_task: {str(e)}" + ) # Optionally, collect these errors. For now, just logging and continuing. - - logger.info(f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found.") + + logger.info( + f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found." + ) return successfully_queued_albums, duplicate_albums diff --git a/routes/utils/celery_config.py b/routes/utils/celery_config.py index f8e5704..70ace42 100644 --- a/routes/utils/celery_config.py +++ b/routes/utils/celery_config.py @@ -7,49 +7,52 @@ from pathlib import Path logger = logging.getLogger(__name__) # Redis configuration - read from environment variables -REDIS_HOST = os.getenv('REDIS_HOST', 'localhost') -REDIS_PORT = os.getenv('REDIS_PORT', '6379') -REDIS_DB = os.getenv('REDIS_DB', '0') +REDIS_HOST = os.getenv("REDIS_HOST", "localhost") +REDIS_PORT = os.getenv("REDIS_PORT", "6379") +REDIS_DB = os.getenv("REDIS_DB", "0") # Optional Redis password -REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', '') +REDIS_PASSWORD = os.getenv("REDIS_PASSWORD", "") # Build default URL with password if provided _password_part = f":{REDIS_PASSWORD}@" if REDIS_PASSWORD else "" default_redis_url = f"redis://{_password_part}{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}" -REDIS_URL = os.getenv('REDIS_URL', default_redis_url) -REDIS_BACKEND = os.getenv('REDIS_BACKEND', REDIS_URL) +REDIS_URL = os.getenv("REDIS_URL", default_redis_url) +REDIS_BACKEND = os.getenv("REDIS_BACKEND", REDIS_URL) # Log Redis connection details -logger.info(f"Redis configuration: REDIS_URL={REDIS_URL}, REDIS_BACKEND={REDIS_BACKEND}") +logger.info( + f"Redis configuration: REDIS_URL={REDIS_URL}, REDIS_BACKEND={REDIS_BACKEND}" +) # Config path -CONFIG_FILE_PATH = Path('./data/config/main.json') +CONFIG_FILE_PATH = Path("./data/config/main.json") DEFAULT_MAIN_CONFIG = { - 'service': 'spotify', - 'spotify': '', - 'deezer': '', - 'fallback': False, - 'spotifyQuality': 'NORMAL', - 'deezerQuality': 'MP3_128', - 'realTime': False, - 'customDirFormat': '%ar_album%/%album%', - 'customTrackFormat': '%tracknum%. %music%', - 'tracknum_padding': True, - 'save_cover': True, - 'maxConcurrentDownloads': 3, - 'maxRetries': 3, - 'retryDelaySeconds': 5, - 'retry_delay_increase': 5, - 'convertTo': None, - 'bitrate': None + "service": "spotify", + "spotify": "", + "deezer": "", + "fallback": False, + "spotifyQuality": "NORMAL", + "deezerQuality": "MP3_128", + "realTime": False, + "customDirFormat": "%ar_album%/%album%", + "customTrackFormat": "%tracknum%. %music%", + "tracknum_padding": True, + "save_cover": True, + "maxConcurrentDownloads": 3, + "maxRetries": 3, + "retryDelaySeconds": 5, + "retry_delay_increase": 5, + "convertTo": None, + "bitrate": None, } + def get_config_params(): """ Get configuration parameters from the config file. Creates the file with defaults if it doesn't exist. Ensures all default keys are present in the loaded config. - + Returns: dict: A dictionary containing configuration parameters """ @@ -59,63 +62,69 @@ def get_config_params(): if not CONFIG_FILE_PATH.exists(): logger.info(f"{CONFIG_FILE_PATH} not found. Creating with default values.") - with open(CONFIG_FILE_PATH, 'w') as f: + with open(CONFIG_FILE_PATH, "w") as f: json.dump(DEFAULT_MAIN_CONFIG, f, indent=4) - return DEFAULT_MAIN_CONFIG.copy() # Return a copy of defaults - - with open(CONFIG_FILE_PATH, 'r') as f: + return DEFAULT_MAIN_CONFIG.copy() # Return a copy of defaults + + with open(CONFIG_FILE_PATH, "r") as f: config = json.load(f) - + # Ensure all default keys are present in the loaded config updated = False for key, value in DEFAULT_MAIN_CONFIG.items(): if key not in config: config[key] = value updated = True - + if updated: - logger.info(f"Configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults.") - with open(CONFIG_FILE_PATH, 'w') as f: + logger.info( + f"Configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults." + ) + with open(CONFIG_FILE_PATH, "w") as f: json.dump(config, f, indent=4) - + return config except Exception as e: - logger.error(f"Error reading or creating config at {CONFIG_FILE_PATH}: {e}", exc_info=True) + logger.error( + f"Error reading or creating config at {CONFIG_FILE_PATH}: {e}", + exc_info=True, + ) # Return defaults if config read/create fails return DEFAULT_MAIN_CONFIG.copy() + # Load configuration values we need for Celery -config_params_values = get_config_params() # Renamed to avoid conflict with module name -MAX_CONCURRENT_DL = config_params_values.get('maxConcurrentDownloads', 3) -MAX_RETRIES = config_params_values.get('maxRetries', 3) -RETRY_DELAY = config_params_values.get('retryDelaySeconds', 5) -RETRY_DELAY_INCREASE = config_params_values.get('retry_delay_increase', 5) +config_params_values = get_config_params() # Renamed to avoid conflict with module name +MAX_CONCURRENT_DL = config_params_values.get("maxConcurrentDownloads", 3) +MAX_RETRIES = config_params_values.get("maxRetries", 3) +RETRY_DELAY = config_params_values.get("retryDelaySeconds", 5) +RETRY_DELAY_INCREASE = config_params_values.get("retry_delay_increase", 5) # Define task queues task_queues = { - 'default': { - 'exchange': 'default', - 'routing_key': 'default', + "default": { + "exchange": "default", + "routing_key": "default", }, - 'downloads': { - 'exchange': 'downloads', - 'routing_key': 'downloads', + "downloads": { + "exchange": "downloads", + "routing_key": "downloads", + }, + "utility_tasks": { + "exchange": "utility_tasks", + "routing_key": "utility_tasks", }, - 'utility_tasks': { - 'exchange': 'utility_tasks', - 'routing_key': 'utility_tasks', - } } # Set default queue -task_default_queue = 'downloads' -task_default_exchange = 'downloads' -task_default_routing_key = 'downloads' +task_default_queue = "downloads" +task_default_exchange = "downloads" +task_default_routing_key = "downloads" # Celery task settings -task_serializer = 'json' -accept_content = ['json'] -result_serializer = 'json' +task_serializer = "json" +accept_content = ["json"] +result_serializer = "json" enable_utc = True # Configure worker concurrency based on MAX_CONCURRENT_DL @@ -123,15 +132,15 @@ worker_concurrency = MAX_CONCURRENT_DL # Configure task rate limiting - these are per-minute limits task_annotations = { - 'routes.utils.celery_tasks.download_track': { - 'rate_limit': f'{MAX_CONCURRENT_DL}/m', + "routes.utils.celery_tasks.download_track": { + "rate_limit": f"{MAX_CONCURRENT_DL}/m", }, - 'routes.utils.celery_tasks.download_album': { - 'rate_limit': f'{MAX_CONCURRENT_DL}/m', + "routes.utils.celery_tasks.download_album": { + "rate_limit": f"{MAX_CONCURRENT_DL}/m", + }, + "routes.utils.celery_tasks.download_playlist": { + "rate_limit": f"{MAX_CONCURRENT_DL}/m", }, - 'routes.utils.celery_tasks.download_playlist': { - 'rate_limit': f'{MAX_CONCURRENT_DL}/m', - } } # Configure retry settings @@ -144,10 +153,10 @@ result_expires = 60 * 60 * 24 * 7 # 7 days # Configure visibility timeout for task messages broker_transport_options = { - 'visibility_timeout': 3600, # 1 hour - 'fanout_prefix': True, - 'fanout_patterns': True, - 'priority_steps': [0, 3, 6, 9], + "visibility_timeout": 3600, # 1 hour + "fanout_prefix": True, + "fanout_patterns": True, + "priority_steps": [0, 3, 6, 9], } # Important broker connection settings @@ -157,4 +166,4 @@ broker_connection_max_retries = 10 broker_pool_limit = 10 worker_prefetch_multiplier = 1 # Process one task at a time per worker worker_max_tasks_per_child = 100 # Restart worker after 100 tasks -worker_disable_rate_limits = False \ No newline at end of file +worker_disable_rate_limits = False diff --git a/routes/utils/celery_manager.py b/routes/utils/celery_manager.py index f32d1aa..f2fcf40 100644 --- a/routes/utils/celery_manager.py +++ b/routes/utils/celery_manager.py @@ -1,26 +1,9 @@ -import os -import json -import signal import subprocess import logging import time -import atexit -from pathlib import Path import threading -import queue -import sys -import uuid # Import Celery task utilities -from .celery_tasks import ( - ProgressState, - get_task_info, - get_last_task_status, - store_task_status, - get_all_tasks as get_all_celery_tasks_info, - cleanup_stale_errors, - delayed_delete_task_data, -) from .celery_config import get_config_params, MAX_CONCURRENT_DL # Configure logging diff --git a/routes/utils/celery_queue_manager.py b/routes/utils/celery_queue_manager.py index dc637fa..548f00e 100644 --- a/routes/utils/celery_queue_manager.py +++ b/routes/utils/celery_queue_manager.py @@ -1,33 +1,29 @@ -import os import json import time import uuid import logging -from datetime import datetime from routes.utils.celery_tasks import ( - celery_app, - download_track, - download_album, + download_track, + download_album, download_playlist, - store_task_status, + store_task_status, store_task_info, get_task_info, - get_task_status, get_last_task_status, cancel_task as cancel_celery_task, retry_task as retry_celery_task, get_all_tasks, - ProgressState + ProgressState, ) # Configure logging logger = logging.getLogger(__name__) # Load configuration -CONFIG_PATH = './data/config/main.json' +CONFIG_PATH = "./data/config/main.json" try: - with open(CONFIG_PATH, 'r') as f: + with open(CONFIG_PATH, "r") as f: config_data = json.load(f) MAX_CONCURRENT_DL = config_data.get("maxConcurrentDownloads", 3) except Exception as e: @@ -35,82 +31,86 @@ except Exception as e: # Fallback default MAX_CONCURRENT_DL = 3 + def get_config_params(): """ Get common download parameters from the config file. This centralizes parameter retrieval and reduces redundancy in API calls. - + Returns: dict: A dictionary containing common parameters from config """ try: - with open(CONFIG_PATH, 'r') as f: + with open(CONFIG_PATH, "r") as f: config = json.load(f) - + return { - 'spotify': config.get('spotify', ''), - 'deezer': config.get('deezer', ''), - 'fallback': config.get('fallback', False), - 'spotifyQuality': config.get('spotifyQuality', 'NORMAL'), - 'deezerQuality': config.get('deezerQuality', 'MP3_128'), - 'realTime': config.get('realTime', False), - 'customDirFormat': config.get('customDirFormat', '%ar_album%/%album%'), - 'customTrackFormat': config.get('customTrackFormat', '%tracknum%. %music%'), - 'tracknum_padding': config.get('tracknum_padding', True), - 'save_cover': config.get('save_cover', True), - 'maxRetries': config.get('maxRetries', 3), - 'retryDelaySeconds': config.get('retryDelaySeconds', 5), - 'retry_delay_increase': config.get('retry_delay_increase', 5), - 'convertTo': config.get('convertTo', None), - 'bitrate': config.get('bitrate', None) + "spotify": config.get("spotify", ""), + "deezer": config.get("deezer", ""), + "fallback": config.get("fallback", False), + "spotifyQuality": config.get("spotifyQuality", "NORMAL"), + "deezerQuality": config.get("deezerQuality", "MP3_128"), + "realTime": config.get("realTime", False), + "customDirFormat": config.get("customDirFormat", "%ar_album%/%album%"), + "customTrackFormat": config.get("customTrackFormat", "%tracknum%. %music%"), + "tracknum_padding": config.get("tracknum_padding", True), + "save_cover": config.get("save_cover", True), + "maxRetries": config.get("maxRetries", 3), + "retryDelaySeconds": config.get("retryDelaySeconds", 5), + "retry_delay_increase": config.get("retry_delay_increase", 5), + "convertTo": config.get("convertTo", None), + "bitrate": config.get("bitrate", None), } except Exception as e: logger.error(f"Error reading config for parameters: {e}") # Return defaults if config read fails return { - 'spotify': '', - 'deezer': '', - 'fallback': False, - 'spotifyQuality': 'NORMAL', - 'deezerQuality': 'MP3_128', - 'realTime': False, - 'customDirFormat': '%ar_album%/%album%', - 'customTrackFormat': '%tracknum%. %music%', - 'tracknum_padding': True, - 'save_cover': True, - 'maxRetries': 3, - 'retryDelaySeconds': 5, - 'retry_delay_increase': 5, - 'convertTo': None, # Default for conversion - 'bitrate': None # Default for bitrate + "spotify": "", + "deezer": "", + "fallback": False, + "spotifyQuality": "NORMAL", + "deezerQuality": "MP3_128", + "realTime": False, + "customDirFormat": "%ar_album%/%album%", + "customTrackFormat": "%tracknum%. %music%", + "tracknum_padding": True, + "save_cover": True, + "maxRetries": 3, + "retryDelaySeconds": 5, + "retry_delay_increase": 5, + "convertTo": None, # Default for conversion + "bitrate": None, # Default for bitrate } + class CeleryDownloadQueueManager: """ Manages a queue of download tasks using Celery. This is a drop-in replacement for the previous DownloadQueueManager. - + Instead of using file-based progress tracking, it uses Redis via Celery for task management and progress tracking. """ - + def __init__(self): """Initialize the Celery-based download queue manager""" self.max_concurrent = MAX_CONCURRENT_DL self.paused = False - print(f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}") - + print( + f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}" + ) + def add_task(self, task: dict, from_watch_job: bool = False): """ Add a new download task to the Celery queue. - If from_watch_job is True and an active duplicate is found, the task is not queued and None is returned. - If from_watch_job is False and an active duplicate is found, a new task ID is created, set to an ERROR state indicating the duplicate, and this new error task's ID is returned. - + Args: task (dict): Task parameters including download_type, url, etc. from_watch_job (bool): If True, duplicate active tasks are skipped. Defaults to False. - + Returns: str | None: Task ID if successfully queued or an error task ID for non-watch duplicates. None if from_watch_job is True and an active duplicate was found. @@ -121,16 +121,18 @@ class CeleryDownloadQueueManager: incoming_type = task.get("download_type", "unknown") if not incoming_url: - logger.warning("Task being added with no URL. Duplicate check might be unreliable.") + logger.warning( + "Task being added with no URL. Duplicate check might be unreliable." + ) NON_BLOCKING_STATES = [ ProgressState.COMPLETE, ProgressState.CANCELLED, - ProgressState.ERROR, + ProgressState.ERROR, ProgressState.ERROR_RETRIED, - ProgressState.ERROR_AUTO_CLEANED + ProgressState.ERROR_AUTO_CLEANED, ] - + all_existing_tasks_summary = get_all_tasks() if incoming_url: for task_summary in all_existing_tasks_summary: @@ -143,21 +145,24 @@ class CeleryDownloadQueueManager: if not existing_task_info or not existing_last_status_obj: continue - + existing_url = existing_task_info.get("url") existing_type = existing_task_info.get("download_type") existing_status = existing_last_status_obj.get("status") - if (existing_url == incoming_url and - existing_type == incoming_type and - existing_status not in NON_BLOCKING_STATES): - + if ( + existing_url == incoming_url + and existing_type == incoming_type + and existing_status not in NON_BLOCKING_STATES + ): message = f"Duplicate download: URL '{incoming_url}' (type: {incoming_type}) is already being processed by task {existing_task_id} (status: {existing_status})." logger.warning(message) - + if from_watch_job: - logger.info(f"Task from watch job for {incoming_url} not queued due to active duplicate {existing_task_id}.") - return None # Skip execution for watch jobs + logger.info( + f"Task from watch job for {incoming_url} not queued due to active duplicate {existing_task_id}." + ) + return None # Skip execution for watch jobs else: # Create a new task_id for this duplicate request and mark it as an error error_task_id = str(uuid.uuid4()) @@ -167,27 +172,31 @@ class CeleryDownloadQueueManager: "name": task.get("name", "Duplicate Task"), "artist": task.get("artist", ""), "url": incoming_url, - "original_request": task.get("orig_request", task.get("original_request", {})), + "original_request": task.get( + "orig_request", task.get("original_request", {}) + ), "created_at": time.time(), - "is_duplicate_error_task": True + "is_duplicate_error_task": True, } store_task_info(error_task_id, error_task_info_payload) error_status_payload = { "status": ProgressState.ERROR, "error": message, - "existing_task_id": existing_task_id, + "existing_task_id": existing_task_id, "timestamp": time.time(), "type": error_task_info_payload["type"], "name": error_task_info_payload["name"], - "artist": error_task_info_payload["artist"] + "artist": error_task_info_payload["artist"], } store_task_status(error_task_id, error_status_payload) - return error_task_id # Return the ID of this new error-state task + return error_task_id # Return the ID of this new error-state task task_id = str(uuid.uuid4()) config_params = get_config_params() - original_request = task.get("orig_request", task.get("original_request", {})) - + original_request = task.get( + "orig_request", task.get("original_request", {}) + ) + complete_task = { "download_type": incoming_type, "type": task.get("type", incoming_type), @@ -195,75 +204,107 @@ class CeleryDownloadQueueManager: "artist": task.get("artist", ""), "url": task.get("url", ""), "retry_url": task.get("retry_url", ""), - "main": original_request.get("main", config_params['deezer']), - "fallback": original_request.get("fallback", - config_params['spotify'] if config_params['fallback'] else None), - "quality": original_request.get("quality", config_params['deezerQuality']), - "fall_quality": original_request.get("fall_quality", config_params['spotifyQuality']), - "real_time": self._parse_bool_param(original_request.get("real_time"), config_params['realTime']), - "custom_dir_format": original_request.get("custom_dir_format", config_params['customDirFormat']), - "custom_track_format": original_request.get("custom_track_format", config_params['customTrackFormat']), - "pad_tracks": self._parse_bool_param(original_request.get("tracknum_padding"), config_params['tracknum_padding']), - "save_cover": self._parse_bool_param(original_request.get("save_cover"), config_params['save_cover']), - "convertTo": original_request.get("convertTo", config_params.get('convertTo')), - "bitrate": original_request.get("bitrate", config_params.get('bitrate')), + "main": original_request.get("main", config_params["deezer"]), + "fallback": original_request.get( + "fallback", + config_params["spotify"] if config_params["fallback"] else None, + ), + "quality": original_request.get( + "quality", config_params["deezerQuality"] + ), + "fall_quality": original_request.get( + "fall_quality", config_params["spotifyQuality"] + ), + "real_time": self._parse_bool_param( + original_request.get("real_time"), config_params["realTime"] + ), + "custom_dir_format": original_request.get( + "custom_dir_format", config_params["customDirFormat"] + ), + "custom_track_format": original_request.get( + "custom_track_format", config_params["customTrackFormat"] + ), + "pad_tracks": self._parse_bool_param( + original_request.get("tracknum_padding"), + config_params["tracknum_padding"], + ), + "save_cover": self._parse_bool_param( + original_request.get("save_cover"), config_params["save_cover"] + ), + "convertTo": original_request.get( + "convertTo", config_params.get("convertTo") + ), + "bitrate": original_request.get( + "bitrate", config_params.get("bitrate") + ), "retry_count": 0, "original_request": original_request, - "created_at": time.time() + "created_at": time.time(), } # If from_watch_job is True, ensure track_details_for_db is passed through if from_watch_job and "track_details_for_db" in task: complete_task["track_details_for_db"] = task["track_details_for_db"] - + store_task_info(task_id, complete_task) - store_task_status(task_id, { - "status": ProgressState.QUEUED, - "timestamp": time.time(), - "type": complete_task["type"], - "name": complete_task["name"], - "artist": complete_task["artist"], - "retry_count": 0, - "queue_position": len(get_all_tasks()) + 1 - }) - + store_task_status( + task_id, + { + "status": ProgressState.QUEUED, + "timestamp": time.time(), + "type": complete_task["type"], + "name": complete_task["name"], + "artist": complete_task["artist"], + "retry_count": 0, + "queue_position": len(get_all_tasks()) + 1, + }, + ) + celery_task_map = { "track": download_track, "album": download_album, - "playlist": download_playlist + "playlist": download_playlist, } - + task_func = celery_task_map.get(incoming_type) if task_func: task_func.apply_async( kwargs=complete_task, task_id=task_id, - countdown=0 if not self.paused else 3600 + countdown=0 if not self.paused else 3600, + ) + logger.info( + f"Added {incoming_type} download task {task_id} to Celery queue." ) - logger.info(f"Added {incoming_type} download task {task_id} to Celery queue.") return task_id else: - store_task_status(task_id, { - "status": ProgressState.ERROR, - "message": f"Unsupported download type: {incoming_type}", - "timestamp": time.time() - }) + store_task_status( + task_id, + { + "status": ProgressState.ERROR, + "message": f"Unsupported download type: {incoming_type}", + "timestamp": time.time(), + }, + ) logger.error(f"Unsupported download type: {incoming_type}") return task_id - + except Exception as e: logger.error(f"Error adding task to Celery queue: {e}", exc_info=True) error_task_id = str(uuid.uuid4()) - store_task_status(error_task_id, { - "status": ProgressState.ERROR, - "message": f"Error adding task to queue: {str(e)}", - "timestamp": time.time(), - "type": task.get("type", "unknown"), - "name": task.get("name", "Unknown"), - "artist": task.get("artist", "") - }) + store_task_status( + error_task_id, + { + "status": ProgressState.ERROR, + "message": f"Error adding task to queue: {str(e)}", + "timestamp": time.time(), + "type": task.get("type", "unknown"), + "name": task.get("name", "Unknown"), + "artist": task.get("artist", ""), + }, + ) return error_task_id - + def _parse_bool_param(self, param_value, default_value=False): """Helper function to parse boolean parameters from string values""" if param_value is None: @@ -271,108 +312,111 @@ class CeleryDownloadQueueManager: if isinstance(param_value, bool): return param_value if isinstance(param_value, str): - return param_value.lower() in ['true', '1', 'yes', 'y', 'on'] + return param_value.lower() in ["true", "1", "yes", "y", "on"] return bool(param_value) - + def cancel_task(self, task_id): """ Cancels a task by its ID. - + Args: task_id (str): The ID of the task to cancel - + Returns: dict: Status information about the cancellation """ return cancel_celery_task(task_id) - + def retry_task(self, task_id): """ Retry a failed task. - + Args: task_id (str): The ID of the failed task to retry - + Returns: dict: Status information about the retry """ return retry_celery_task(task_id) - + def cancel_all_tasks(self): """ Cancel all currently queued and running tasks. - + Returns: dict: Status information about the cancellation """ tasks = get_all_tasks() cancelled_count = 0 - + for task in tasks: task_id = task.get("task_id") status = task.get("status") - + # Only cancel tasks that are not already completed or cancelled if status not in [ProgressState.COMPLETE, ProgressState.CANCELLED]: result = cancel_celery_task(task_id) if result.get("status") == "cancelled": cancelled_count += 1 - + return { "status": "all_cancelled", "cancelled_count": cancelled_count, - "total_tasks": len(tasks) + "total_tasks": len(tasks), } - + def get_queue_status(self): """ Get the current status of the queue. - + Returns: dict: Status information about the queue """ tasks = get_all_tasks() - + # Count tasks by status running_count = 0 pending_count = 0 failed_count = 0 - + running_tasks = [] failed_tasks = [] - + for task in tasks: status = task.get("status") - + if status == ProgressState.PROCESSING: running_count += 1 - running_tasks.append({ - "task_id": task.get("task_id"), - "name": task.get("name", "Unknown"), - "type": task.get("type", "unknown"), - "download_type": task.get("download_type", "unknown") - }) + running_tasks.append( + { + "task_id": task.get("task_id"), + "name": task.get("name", "Unknown"), + "type": task.get("type", "unknown"), + "download_type": task.get("download_type", "unknown"), + } + ) elif status == ProgressState.QUEUED: pending_count += 1 elif status == ProgressState.ERROR: failed_count += 1 - + # Get task info for retry information - task_info = get_task_info(task.get("task_id")) last_status = get_last_task_status(task.get("task_id")) - + retry_count = 0 if last_status: retry_count = last_status.get("retry_count", 0) - - failed_tasks.append({ - "task_id": task.get("task_id"), - "name": task.get("name", "Unknown"), - "type": task.get("type", "unknown"), - "download_type": task.get("download_type", "unknown"), - "retry_count": retry_count - }) - + + failed_tasks.append( + { + "task_id": task.get("task_id"), + "name": task.get("name", "Unknown"), + "type": task.get("type", "unknown"), + "download_type": task.get("download_type", "unknown"), + "retry_count": retry_count, + } + ) + return { "running": running_count, "pending": pending_count, @@ -380,87 +424,85 @@ class CeleryDownloadQueueManager: "max_concurrent": self.max_concurrent, "paused": self.paused, "running_tasks": running_tasks, - "failed_tasks": failed_tasks + "failed_tasks": failed_tasks, } - + def pause(self): """Pause processing of new tasks.""" self.paused = True - + # Get all queued tasks tasks = get_all_tasks() for task in tasks: if task.get("status") == ProgressState.QUEUED: # Update status to indicate the task is paused - store_task_status(task.get("task_id"), { - "status": ProgressState.QUEUED, - "paused": True, - "message": "Queue is paused, task will run when queue is resumed", - "timestamp": time.time() - }) - + store_task_status( + task.get("task_id"), + { + "status": ProgressState.QUEUED, + "paused": True, + "message": "Queue is paused, task will run when queue is resumed", + "timestamp": time.time(), + }, + ) + logger.info("Download queue processing paused") return {"status": "paused"} - + def resume(self): """Resume processing of tasks.""" self.paused = False - + # Get all queued tasks tasks = get_all_tasks() for task in tasks: if task.get("status") == ProgressState.QUEUED: task_id = task.get("task_id") - + # Get the task info task_info = get_task_info(task_id) if not task_info: continue - + # Update status to indicate the task is no longer paused - store_task_status(task_id, { - "status": ProgressState.QUEUED, - "paused": False, - "message": "Queue resumed, task will run soon", - "timestamp": time.time() - }) - + store_task_status( + task_id, + { + "status": ProgressState.QUEUED, + "paused": False, + "message": "Queue resumed, task will run soon", + "timestamp": time.time(), + }, + ) + # Reschedule the task to run immediately download_type = task_info.get("download_type", "unknown") - + if download_type == "track": - download_track.apply_async( - kwargs=task_info, - task_id=task_id - ) + download_track.apply_async(kwargs=task_info, task_id=task_id) elif download_type == "album": - download_album.apply_async( - kwargs=task_info, - task_id=task_id - ) + download_album.apply_async(kwargs=task_info, task_id=task_id) elif download_type == "playlist": - download_playlist.apply_async( - kwargs=task_info, - task_id=task_id - ) - + download_playlist.apply_async(kwargs=task_info, task_id=task_id) + logger.info("Download queue processing resumed") return {"status": "resumed"} - + def start(self): """Start the queue manager (no-op for Celery implementation).""" logger.info("Celery Download Queue Manager started") return {"status": "started"} - + def stop(self): """Stop the queue manager (graceful shutdown).""" logger.info("Celery Download Queue Manager stopping...") - + # Cancel all tasks or just let them finish? # For now, we'll let them finish and just log the shutdown - + logger.info("Celery Download Queue Manager stopped") return {"status": "stopped"} + # Create the global instance -download_queue_manager = CeleryDownloadQueueManager() \ No newline at end of file +download_queue_manager = CeleryDownloadQueueManager() diff --git a/routes/utils/celery_tasks.py b/routes/utils/celery_tasks.py index 7db9635..155ba30 100644 --- a/routes/utils/celery_tasks.py +++ b/routes/utils/celery_tasks.py @@ -1,9 +1,7 @@ import time import json -import uuid import logging import traceback -from datetime import datetime from celery import Celery, Task, states from celery.signals import ( task_prerun, @@ -14,17 +12,13 @@ from celery.signals import ( setup_logging, ) from celery.exceptions import Retry -import os # Added for path operations from pathlib import Path # Added for path operations -# Configure logging -logger = logging.getLogger(__name__) # Setup Redis and Celery from routes.utils.celery_config import ( REDIS_URL, REDIS_BACKEND, - REDIS_PASSWORD, get_config_params, ) @@ -37,6 +31,12 @@ from routes.utils.watch.db import ( # Import history manager function from .history_manager import add_entry_to_history +# Create Redis connection for storing task data that's not part of the Celery result backend +import redis + +# Configure logging +logger = logging.getLogger(__name__) + # Initialize Celery app celery_app = Celery( "routes.utils.celery_tasks", broker=REDIS_URL, backend=REDIS_BACKEND @@ -45,8 +45,6 @@ celery_app = Celery( # Load Celery config celery_app.config_from_object("routes.utils.celery_config") -# Create Redis connection for storing task data that's not part of the Celery result backend -import redis redis_client = redis.Redis.from_url(REDIS_URL) diff --git a/routes/utils/credentials.py b/routes/utils/credentials.py index 1a6650d..23a5cef 100755 --- a/routes/utils/credentials.py +++ b/routes/utils/credentials.py @@ -2,8 +2,7 @@ import json from pathlib import Path import shutil import sqlite3 -import traceback # For logging detailed error messages -import time # For retry delays +import time # For retry delays import logging # Assuming deezspot is in a location findable by Python's import system @@ -11,38 +10,42 @@ import logging # from deezspot.deezloader import DeeLogin # Used in validation # For now, as per original, validation calls these directly. -logger = logging.getLogger(__name__) # Assuming logger is configured elsewhere +logger = logging.getLogger(__name__) # Assuming logger is configured elsewhere # --- New Database and Path Definitions --- -CREDS_BASE_DIR = Path('./data/creds') -ACCOUNTS_DB_PATH = CREDS_BASE_DIR / 'accounts.db' -BLOBS_DIR = CREDS_BASE_DIR / 'blobs' -GLOBAL_SEARCH_JSON_PATH = CREDS_BASE_DIR / 'search.json' # Global Spotify API creds +CREDS_BASE_DIR = Path("./data/creds") +ACCOUNTS_DB_PATH = CREDS_BASE_DIR / "accounts.db" +BLOBS_DIR = CREDS_BASE_DIR / "blobs" +GLOBAL_SEARCH_JSON_PATH = CREDS_BASE_DIR / "search.json" # Global Spotify API creds EXPECTED_SPOTIFY_TABLE_COLUMNS = { "name": "TEXT PRIMARY KEY", # client_id and client_secret are now global - "region": "TEXT", # ISO 3166-1 alpha-2 + "region": "TEXT", # ISO 3166-1 alpha-2 "created_at": "REAL", - "updated_at": "REAL" + "updated_at": "REAL", } EXPECTED_DEEZER_TABLE_COLUMNS = { "name": "TEXT PRIMARY KEY", "arl": "TEXT", - "region": "TEXT", # ISO 3166-1 alpha-2 + "region": "TEXT", # ISO 3166-1 alpha-2 "created_at": "REAL", - "updated_at": "REAL" + "updated_at": "REAL", } + def _get_db_connection(): ACCOUNTS_DB_PATH.parent.mkdir(parents=True, exist_ok=True) - BLOBS_DIR.mkdir(parents=True, exist_ok=True) # Ensure blobs directory also exists + BLOBS_DIR.mkdir(parents=True, exist_ok=True) # Ensure blobs directory also exists conn = sqlite3.connect(ACCOUNTS_DB_PATH, timeout=10) conn.row_factory = sqlite3.Row return conn -def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_columns: dict): + +def _ensure_table_schema( + cursor: sqlite3.Cursor, table_name: str, expected_columns: dict +): """Ensures the given table has all expected columns, adding them if necessary.""" try: cursor.execute(f"PRAGMA table_info({table_name})") @@ -53,17 +56,21 @@ def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_colum for col_name, col_type in expected_columns.items(): if col_name not in existing_column_names: # Basic protection against altering PK after creation if table is not empty - if 'PRIMARY KEY' in col_type.upper() and existing_columns_info: + if "PRIMARY KEY" in col_type.upper() and existing_columns_info: logger.warning( f"Column '{col_name}' is part of PRIMARY KEY for table '{table_name}' " f"and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN." ) continue - col_type_for_add = col_type.replace(' PRIMARY KEY', '').strip() + col_type_for_add = col_type.replace(" PRIMARY KEY", "").strip() try: - cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_for_add}") - logger.info(f"Added missing column '{col_name} {col_type_for_add}' to table '{table_name}'.") + cursor.execute( + f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_for_add}" + ) + logger.info( + f"Added missing column '{col_name} {col_type_for_add}' to table '{table_name}'." + ) added_columns = True except sqlite3.OperationalError as alter_e: logger.warning( @@ -72,9 +79,12 @@ def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_colum ) return added_columns except sqlite3.Error as e: - logger.error(f"Error ensuring schema for table '{table_name}': {e}", exc_info=True) + logger.error( + f"Error ensuring schema for table '{table_name}': {e}", exc_info=True + ) return False + def init_credentials_db(): """Initializes the accounts.db and its tables if they don't exist.""" try: @@ -90,7 +100,7 @@ def init_credentials_db(): ) """) _ensure_table_schema(cursor, "spotify", EXPECTED_SPOTIFY_TABLE_COLUMNS) - + # Deezer Table cursor.execute(""" CREATE TABLE IF NOT EXISTS deezer ( @@ -102,49 +112,75 @@ def init_credentials_db(): ) """) _ensure_table_schema(cursor, "deezer", EXPECTED_DEEZER_TABLE_COLUMNS) - + # Ensure global search.json exists, create if not if not GLOBAL_SEARCH_JSON_PATH.exists(): - logger.info(f"Global Spotify search credential file not found at {GLOBAL_SEARCH_JSON_PATH}. Creating empty file.") - with open(GLOBAL_SEARCH_JSON_PATH, 'w') as f_search: - json.dump({"client_id": "", "client_secret": ""}, f_search, indent=4) + logger.info( + f"Global Spotify search credential file not found at {GLOBAL_SEARCH_JSON_PATH}. Creating empty file." + ) + with open(GLOBAL_SEARCH_JSON_PATH, "w") as f_search: + json.dump( + {"client_id": "", "client_secret": ""}, f_search, indent=4 + ) conn.commit() - logger.info(f"Credentials database initialized/schema checked at {ACCOUNTS_DB_PATH}") + logger.info( + f"Credentials database initialized/schema checked at {ACCOUNTS_DB_PATH}" + ) except sqlite3.Error as e: logger.error(f"Error initializing credentials database: {e}", exc_info=True) raise + def _get_global_spotify_api_creds(): """Loads client_id and client_secret from the global search.json.""" if GLOBAL_SEARCH_JSON_PATH.exists(): try: - with open(GLOBAL_SEARCH_JSON_PATH, 'r') as f: + with open(GLOBAL_SEARCH_JSON_PATH, "r") as f: search_data = json.load(f) - client_id = search_data.get('client_id') - client_secret = search_data.get('client_secret') + client_id = search_data.get("client_id") + client_secret = search_data.get("client_secret") if client_id and client_secret: return client_id, client_secret else: - logger.warning(f"Global Spotify API credentials in {GLOBAL_SEARCH_JSON_PATH} are incomplete.") + logger.warning( + f"Global Spotify API credentials in {GLOBAL_SEARCH_JSON_PATH} are incomplete." + ) except Exception as e: - logger.error(f"Error reading global Spotify API credentials from {GLOBAL_SEARCH_JSON_PATH}: {e}", exc_info=True) + logger.error( + f"Error reading global Spotify API credentials from {GLOBAL_SEARCH_JSON_PATH}: {e}", + exc_info=True, + ) else: - logger.warning(f"Global Spotify API credential file {GLOBAL_SEARCH_JSON_PATH} not found.") - return None, None # Return None if file doesn't exist or creds are incomplete/invalid + logger.warning( + f"Global Spotify API credential file {GLOBAL_SEARCH_JSON_PATH} not found." + ) + return ( + None, + None, + ) # Return None if file doesn't exist or creds are incomplete/invalid + def save_global_spotify_api_creds(client_id: str, client_secret: str): """Saves client_id and client_secret to the global search.json.""" try: GLOBAL_SEARCH_JSON_PATH.parent.mkdir(parents=True, exist_ok=True) - with open(GLOBAL_SEARCH_JSON_PATH, 'w') as f: - json.dump({"client_id": client_id, "client_secret": client_secret}, f, indent=4) - logger.info(f"Global Spotify API credentials saved to {GLOBAL_SEARCH_JSON_PATH}") + with open(GLOBAL_SEARCH_JSON_PATH, "w") as f: + json.dump( + {"client_id": client_id, "client_secret": client_secret}, f, indent=4 + ) + logger.info( + f"Global Spotify API credentials saved to {GLOBAL_SEARCH_JSON_PATH}" + ) return True except Exception as e: - logger.error(f"Error saving global Spotify API credentials to {GLOBAL_SEARCH_JSON_PATH}: {e}", exc_info=True) + logger.error( + f"Error saving global Spotify API credentials to {GLOBAL_SEARCH_JSON_PATH}: {e}", + exc_info=True, + ) return False + def _validate_with_retry(service_name, account_name, validation_data): """ Attempts to validate credentials with retries for connection errors. @@ -153,59 +189,84 @@ def _validate_with_retry(service_name, account_name, validation_data): Returns True if validated, raises ValueError if not. """ # Deezspot imports need to be available. Assuming they are. - from deezspot.spotloader import SpoLogin + from deezspot.spotloader import SpoLogin from deezspot.deezloader import DeeLogin - max_retries = 3 # Reduced for brevity, was 5 + max_retries = 3 # Reduced for brevity, was 5 last_exception = None for attempt in range(max_retries): try: - if service_name == 'spotify': + if service_name == "spotify": # For Spotify, validation uses the account's blob and GLOBAL API creds global_client_id, global_client_secret = _get_global_spotify_api_creds() if not global_client_id or not global_client_secret: - raise ValueError("Global Spotify API client_id or client_secret not configured for validation.") - - blob_file_path = validation_data.get('blob_file_path') + raise ValueError( + "Global Spotify API client_id or client_secret not configured for validation." + ) + + blob_file_path = validation_data.get("blob_file_path") if not blob_file_path or not Path(blob_file_path).exists(): - raise ValueError(f"Spotify blob file missing for validation of account {account_name}") - SpoLogin(credentials_path=str(blob_file_path), spotify_client_id=global_client_id, spotify_client_secret=global_client_secret) - else: # Deezer - arl = validation_data.get('arl') + raise ValueError( + f"Spotify blob file missing for validation of account {account_name}" + ) + SpoLogin( + credentials_path=str(blob_file_path), + spotify_client_id=global_client_id, + spotify_client_secret=global_client_secret, + ) + else: # Deezer + arl = validation_data.get("arl") if not arl: raise ValueError("Missing 'arl' for Deezer validation.") DeeLogin(arl=arl) - - logger.info(f"{service_name.capitalize()} credentials for {account_name} validated successfully (attempt {attempt + 1}).") + + logger.info( + f"{service_name.capitalize()} credentials for {account_name} validated successfully (attempt {attempt + 1})." + ) return True except Exception as e: last_exception = e error_str = str(e).lower() is_connection_error = ( - "connection refused" in error_str or "connection error" in error_str or - "timeout" in error_str or "temporary failure in name resolution" in error_str or - "dns lookup failed" in error_str or "network is unreachable" in error_str or - "ssl handshake failed" in error_str or "connection reset by peer" in error_str + "connection refused" in error_str + or "connection error" in error_str + or "timeout" in error_str + or "temporary failure in name resolution" in error_str + or "dns lookup failed" in error_str + or "network is unreachable" in error_str + or "ssl handshake failed" in error_str + or "connection reset by peer" in error_str ) if is_connection_error and attempt < max_retries - 1: - retry_delay = 2 + attempt - logger.warning(f"Validation for {account_name} ({service_name}) failed (attempt {attempt + 1}) due to connection issue: {e}. Retrying in {retry_delay}s...") + retry_delay = 2 + attempt + logger.warning( + f"Validation for {account_name} ({service_name}) failed (attempt {attempt + 1}) due to connection issue: {e}. Retrying in {retry_delay}s..." + ) time.sleep(retry_delay) continue else: - logger.error(f"Validation for {account_name} ({service_name}) failed on attempt {attempt + 1} (non-retryable or max retries).") + logger.error( + f"Validation for {account_name} ({service_name}) failed on attempt {attempt + 1} (non-retryable or max retries)." + ) break if last_exception: base_error_message = str(last_exception).splitlines()[-1] detailed_error_message = f"Invalid {service_name} credentials for {account_name}. Verification failed: {base_error_message}" - if service_name == 'spotify' and "incorrect padding" in base_error_message.lower(): - detailed_error_message += ". Hint: For Spotify, ensure the credentials blob content is correct." + if ( + service_name == "spotify" + and "incorrect padding" in base_error_message.lower() + ): + detailed_error_message += ( + ". Hint: For Spotify, ensure the credentials blob content is correct." + ) raise ValueError(detailed_error_message) else: - raise ValueError(f"Invalid {service_name} credentials for {account_name}. Verification failed (unknown reason after retries).") + raise ValueError( + f"Invalid {service_name} credentials for {account_name}. Verification failed (unknown reason after retries)." + ) def create_credential(service, name, data): @@ -219,54 +280,67 @@ def create_credential(service, name, data): Raises: ValueError, FileExistsError """ - if service not in ['spotify', 'deezer']: + if service not in ["spotify", "deezer"]: raise ValueError("Service must be 'spotify' or 'deezer'") if not name or not isinstance(name, str): raise ValueError("Credential name must be a non-empty string.") current_time = time.time() - + with _get_db_connection() as conn: cursor = conn.cursor() conn.row_factory = sqlite3.Row try: - if service == 'spotify': - required_fields = {'region', 'blob_content'} # client_id/secret are global + if service == "spotify": + required_fields = { + "region", + "blob_content", + } # client_id/secret are global if not required_fields.issubset(data.keys()): - raise ValueError(f"Missing fields for Spotify. Required: {required_fields}") + raise ValueError( + f"Missing fields for Spotify. Required: {required_fields}" + ) + + blob_path = BLOBS_DIR / name / "credentials.json" + validation_data = { + "blob_file_path": str(blob_path) + } # Validation uses global API creds - blob_path = BLOBS_DIR / name / 'credentials.json' - validation_data = {'blob_file_path': str(blob_path)} # Validation uses global API creds - blob_path.parent.mkdir(parents=True, exist_ok=True) - with open(blob_path, 'w') as f_blob: - if isinstance(data['blob_content'], dict): - json.dump(data['blob_content'], f_blob, indent=4) - else: # assume string - f_blob.write(data['blob_content']) - + with open(blob_path, "w") as f_blob: + if isinstance(data["blob_content"], dict): + json.dump(data["blob_content"], f_blob, indent=4) + else: # assume string + f_blob.write(data["blob_content"]) + try: - _validate_with_retry('spotify', name, validation_data) + _validate_with_retry("spotify", name, validation_data) cursor.execute( "INSERT INTO spotify (name, region, created_at, updated_at) VALUES (?, ?, ?, ?)", - (name, data['region'], current_time, current_time) + (name, data["region"], current_time, current_time), ) - except Exception as e: - if blob_path.exists(): blob_path.unlink() # Cleanup blob - if blob_path.parent.exists() and not any(blob_path.parent.iterdir()): blob_path.parent.rmdir() - raise # Re-raise validation or DB error - - elif service == 'deezer': - required_fields = {'arl', 'region'} + except Exception: + if blob_path.exists(): + blob_path.unlink() # Cleanup blob + if blob_path.parent.exists() and not any( + blob_path.parent.iterdir() + ): + blob_path.parent.rmdir() + raise # Re-raise validation or DB error + + elif service == "deezer": + required_fields = {"arl", "region"} if not required_fields.issubset(data.keys()): - raise ValueError(f"Missing fields for Deezer. Required: {required_fields}") - - validation_data = {'arl': data['arl']} - _validate_with_retry('deezer', name, validation_data) - + raise ValueError( + f"Missing fields for Deezer. Required: {required_fields}" + ) + + validation_data = {"arl": data["arl"]} + _validate_with_retry("deezer", name, validation_data) + cursor.execute( "INSERT INTO deezer (name, arl, region, created_at, updated_at) VALUES (?, ?, ?, ?, ?)", - (name, data['arl'], data['region'], current_time, current_time) + (name, data["arl"], data["region"], current_time, current_time), ) conn.commit() logger.info(f"Credential '{name}' for {service} created successfully.") @@ -274,7 +348,9 @@ def create_credential(service, name, data): except sqlite3.IntegrityError: raise FileExistsError(f"Credential '{name}' already exists for {service}.") except Exception as e: - logger.error(f"Error creating credential {name} for {service}: {e}", exc_info=True) + logger.error( + f"Error creating credential {name} for {service}: {e}", exc_info=True + ) raise ValueError(f"Could not create credential: {e}") @@ -285,12 +361,12 @@ def get_credential(service, name): For Deezer, returns dict with name, arl, and region. Raises FileNotFoundError if the credential does not exist. """ - if service not in ['spotify', 'deezer']: + if service not in ["spotify", "deezer"]: raise ValueError("Service must be 'spotify' or 'deezer'") - + with _get_db_connection() as conn: cursor = conn.cursor() - conn.row_factory = sqlite3.Row # Ensure row_factory is set for this cursor + conn.row_factory = sqlite3.Row # Ensure row_factory is set for this cursor cursor.execute(f"SELECT * FROM {service} WHERE name = ?", (name,)) row = cursor.fetchone() @@ -299,63 +375,72 @@ def get_credential(service, name): data = dict(row) - if service == 'spotify': - blob_file_path = BLOBS_DIR / name / 'credentials.json' - data['blob_file_path'] = str(blob_file_path) # Keep for internal use + if service == "spotify": + blob_file_path = BLOBS_DIR / name / "credentials.json" + data["blob_file_path"] = str(blob_file_path) # Keep for internal use try: - with open(blob_file_path, 'r') as f_blob: + with open(blob_file_path, "r") as f_blob: blob_data = json.load(f_blob) - data['blob_content'] = blob_data + data["blob_content"] = blob_data except FileNotFoundError: - logger.warning(f"Spotify blob file not found for {name} at {blob_file_path} during get_credential.") - data['blob_content'] = None + logger.warning( + f"Spotify blob file not found for {name} at {blob_file_path} during get_credential." + ) + data["blob_content"] = None except json.JSONDecodeError: - logger.warning(f"Error decoding JSON from Spotify blob file for {name} at {blob_file_path}.") - data['blob_content'] = None + logger.warning( + f"Error decoding JSON from Spotify blob file for {name} at {blob_file_path}." + ) + data["blob_content"] = None except Exception as e: - logger.error(f"Unexpected error reading Spotify blob for {name}: {e}", exc_info=True) - data['blob_content'] = None - + logger.error( + f"Unexpected error reading Spotify blob for {name}: {e}", + exc_info=True, + ) + data["blob_content"] = None + cleaned_data = { - 'name': data.get('name'), - 'region': data.get('region'), - 'blob_content': data.get('blob_content') + "name": data.get("name"), + "region": data.get("region"), + "blob_content": data.get("blob_content"), } return cleaned_data - - elif service == 'deezer': + + elif service == "deezer": cleaned_data = { - 'name': data.get('name'), - 'region': data.get('region'), - 'arl': data.get('arl') + "name": data.get("name"), + "region": data.get("region"), + "arl": data.get("arl"), } return cleaned_data - + # Fallback, should not be reached if service is spotify or deezer return None + def list_credentials(service): - if service not in ['spotify', 'deezer']: + if service not in ["spotify", "deezer"]: raise ValueError("Service must be 'spotify' or 'deezer'") - + with _get_db_connection() as conn: cursor = conn.cursor() conn.row_factory = sqlite3.Row cursor.execute(f"SELECT name FROM {service}") - return [row['name'] for row in cursor.fetchall()] + return [row["name"] for row in cursor.fetchall()] + def delete_credential(service, name): - if service not in ['spotify', 'deezer']: + if service not in ["spotify", "deezer"]: raise ValueError("Service must be 'spotify' or 'deezer'") - + with _get_db_connection() as conn: cursor = conn.cursor() conn.row_factory = sqlite3.Row cursor.execute(f"DELETE FROM {service} WHERE name = ?", (name,)) if cursor.rowcount == 0: raise FileNotFoundError(f"Credential '{name}' not found for {service}.") - - if service == 'spotify': + + if service == "spotify": blob_dir = BLOBS_DIR / name if blob_dir.exists(): shutil.rmtree(blob_dir) @@ -363,6 +448,7 @@ def delete_credential(service, name): logger.info(f"Credential '{name}' for {service} deleted.") return {"status": "deleted", "service": service, "name": name} + def edit_credential(service, name, new_data): """ Edits an existing credential. @@ -370,98 +456,125 @@ def edit_credential(service, name, new_data): new_data for Deezer can include: arl, region. Fields not in new_data remain unchanged. """ - if service not in ['spotify', 'deezer']: + if service not in ["spotify", "deezer"]: raise ValueError("Service must be 'spotify' or 'deezer'") - + current_time = time.time() - + # Fetch existing data first to preserve unchanged fields and for validation backup try: - existing_cred = get_credential(service, name) # This will raise FileNotFoundError if not found + existing_cred = get_credential( + service, name + ) # This will raise FileNotFoundError if not found except FileNotFoundError: raise - except Exception as e: # Catch other errors from get_credential + except Exception as e: # Catch other errors from get_credential raise ValueError(f"Could not retrieve existing credential {name} for edit: {e}") updated_fields = new_data.copy() - + with _get_db_connection() as conn: cursor = conn.cursor() conn.row_factory = sqlite3.Row - - if service == 'spotify': + + if service == "spotify": # Prepare data for DB update db_update_data = { - 'region': updated_fields.get('region', existing_cred['region']), - 'updated_at': current_time, - 'name': name # for WHERE clause + "region": updated_fields.get("region", existing_cred["region"]), + "updated_at": current_time, + "name": name, # for WHERE clause } - - blob_path = Path(existing_cred['blob_file_path']) # Use path from existing + + blob_path = Path(existing_cred["blob_file_path"]) # Use path from existing original_blob_content = None if blob_path.exists(): - with open(blob_path, 'r') as f_orig_blob: + with open(blob_path, "r") as f_orig_blob: original_blob_content = f_orig_blob.read() # If blob_content is being updated, write it temporarily for validation - if 'blob_content' in updated_fields: + if "blob_content" in updated_fields: blob_path.parent.mkdir(parents=True, exist_ok=True) - with open(blob_path, 'w') as f_new_blob: - if isinstance(updated_fields['blob_content'], dict): - json.dump(updated_fields['blob_content'], f_new_blob, indent=4) + with open(blob_path, "w") as f_new_blob: + if isinstance(updated_fields["blob_content"], dict): + json.dump(updated_fields["blob_content"], f_new_blob, indent=4) else: - f_new_blob.write(updated_fields['blob_content']) - - validation_data = {'blob_file_path': str(blob_path)} + f_new_blob.write(updated_fields["blob_content"]) + + validation_data = {"blob_file_path": str(blob_path)} try: - _validate_with_retry('spotify', name, validation_data) - - set_clause = ", ".join([f"{key} = ?" for key in db_update_data if key != 'name']) - values = [db_update_data[key] for key in db_update_data if key != 'name'] + [name] - cursor.execute(f"UPDATE spotify SET {set_clause} WHERE name = ?", tuple(values)) + _validate_with_retry("spotify", name, validation_data) + + set_clause = ", ".join( + [f"{key} = ?" for key in db_update_data if key != "name"] + ) + values = [ + db_update_data[key] for key in db_update_data if key != "name" + ] + [name] + cursor.execute( + f"UPDATE spotify SET {set_clause} WHERE name = ?", tuple(values) + ) # If validation passed and blob was in new_data, it's already written. # If blob_content was NOT in new_data, the existing blob (if any) remains. - except Exception as e: + except Exception: # Revert blob if it was changed and validation failed - if 'blob_content' in updated_fields and original_blob_content is not None: - with open(blob_path, 'w') as f_revert_blob: + if ( + "blob_content" in updated_fields + and original_blob_content is not None + ): + with open(blob_path, "w") as f_revert_blob: f_revert_blob.write(original_blob_content) - elif 'blob_content' in updated_fields and original_blob_content is None and blob_path.exists(): + elif ( + "blob_content" in updated_fields + and original_blob_content is None + and blob_path.exists() + ): # If new blob was written but there was no original to revert to, delete the new one. blob_path.unlink() - raise # Re-raise validation or DB error + raise # Re-raise validation or DB error - elif service == 'deezer': + elif service == "deezer": db_update_data = { - 'arl': updated_fields.get('arl', existing_cred['arl']), - 'region': updated_fields.get('region', existing_cred['region']), - 'updated_at': current_time, - 'name': name # for WHERE clause + "arl": updated_fields.get("arl", existing_cred["arl"]), + "region": updated_fields.get("region", existing_cred["region"]), + "updated_at": current_time, + "name": name, # for WHERE clause } - - validation_data = {'arl': db_update_data['arl']} - _validate_with_retry('deezer', name, validation_data) # Validation happens before DB write for Deezer - set_clause = ", ".join([f"{key} = ?" for key in db_update_data if key != 'name']) - values = [db_update_data[key] for key in db_update_data if key != 'name'] + [name] - cursor.execute(f"UPDATE deezer SET {set_clause} WHERE name = ?", tuple(values)) + validation_data = {"arl": db_update_data["arl"]} + _validate_with_retry( + "deezer", name, validation_data + ) # Validation happens before DB write for Deezer + + set_clause = ", ".join( + [f"{key} = ?" for key in db_update_data if key != "name"] + ) + values = [ + db_update_data[key] for key in db_update_data if key != "name" + ] + [name] + cursor.execute( + f"UPDATE deezer SET {set_clause} WHERE name = ?", tuple(values) + ) + + if cursor.rowcount == 0: # Should not happen if get_credential succeeded + raise FileNotFoundError( + f"Credential '{name}' for {service} disappeared during edit." + ) - if cursor.rowcount == 0: # Should not happen if get_credential succeeded - raise FileNotFoundError(f"Credential '{name}' for {service} disappeared during edit.") - conn.commit() logger.info(f"Credential '{name}' for {service} updated successfully.") return {"status": "updated", "service": service, "name": name} + # --- Helper for credential file path (mainly for Spotify blob) --- def get_spotify_blob_path(account_name: str) -> Path: - return BLOBS_DIR / account_name / 'credentials.json' + return BLOBS_DIR / account_name / "credentials.json" + # It's good practice to call init_credentials_db() when the app starts. # This can be done in the main application setup. For now, defining it here. # If this script is run directly for setup, you could add: # if __name__ == '__main__': # init_credentials_db() -# print("Credentials database initialized.") \ No newline at end of file +# print("Credentials database initialized.") diff --git a/routes/utils/get_info.py b/routes/utils/get_info.py index 1f44a56..d0558c6 100644 --- a/routes/utils/get_info.py +++ b/routes/utils/get_info.py @@ -1,8 +1,4 @@ -#!/usr/bin/python3 - from deezspot.easy_spoty import Spo -import json -from pathlib import Path from routes.utils.celery_queue_manager import get_config_params from routes.utils.credentials import get_credential, _get_global_spotify_api_creds @@ -13,37 +9,42 @@ import logging # Initialize logger logger = logging.getLogger(__name__) -# We'll rely on get_config_params() instead of directly loading the config file def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None): """ Get info from Spotify API. Uses global client_id/secret from search.json. The default Spotify account from main.json might still be relevant for other Spo settings or if Spo uses it. - + Args: spotify_id: The Spotify ID of the entity spotify_type: The type of entity (track, album, playlist, artist, artist_discography, episode) limit (int, optional): The maximum number of items to return. Only used if spotify_type is "artist_discography". offset (int, optional): The index of the first item to return. Only used if spotify_type is "artist_discography". - + Returns: Dictionary with the entity information """ client_id, client_secret = _get_global_spotify_api_creds() - + if not client_id or not client_secret: - raise ValueError("Global Spotify API client_id or client_secret not configured in ./data/creds/search.json.") + raise ValueError( + "Global Spotify API client_id or client_secret not configured in ./data/creds/search.json." + ) # Get config parameters including default Spotify account name # This might still be useful if Spo uses the account name for other things (e.g. market/region if not passed explicitly) # For now, we are just ensuring the API keys are set. config_params = get_config_params() - main_spotify_account_name = config_params.get('spotify', '') # Still good to know which account is 'default' contextually - + main_spotify_account_name = config_params.get( + "spotify", "" + ) # Still good to know which account is 'default' contextually + if not main_spotify_account_name: # This is less critical now that API keys are global, but could indicate a misconfiguration # if other parts of Spo expect an account context. - print(f"WARN: No default Spotify account name configured in settings (main.json). API calls will use global keys.") + print( + "WARN: No default Spotify account name configured in settings (main.json). API calls will use global keys." + ) else: # Optionally, one could load the specific account's region here if Spo.init or methods need it, # but easy_spoty's Spo doesn't seem to take region directly in __init__. @@ -51,16 +52,20 @@ def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None): try: # We call get_credential just to check if the account exists, # not for client_id/secret anymore for Spo.__init__ - get_credential('spotify', main_spotify_account_name) + get_credential("spotify", main_spotify_account_name) except FileNotFoundError: # This is a more serious warning if an account is expected to exist. - print(f"WARN: Default Spotify account '{main_spotify_account_name}' configured in main.json was not found in credentials database.") + print( + f"WARN: Default Spotify account '{main_spotify_account_name}' configured in main.json was not found in credentials database." + ) except Exception as e: - print(f"WARN: Error accessing default Spotify account '{main_spotify_account_name}': {e}") + print( + f"WARN: Error accessing default Spotify account '{main_spotify_account_name}': {e}" + ) # Initialize the Spotify client with GLOBAL credentials Spo.__init__(client_id, client_secret) - + if spotify_type == "track": return Spo.get_track(spotify_id) elif spotify_type == "album": @@ -83,27 +88,30 @@ def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None): else: raise ValueError(f"Unsupported Spotify type: {spotify_type}") + def get_deezer_info(deezer_id, deezer_type, limit=None): """ Get info from Deezer API. - + Args: deezer_id: The Deezer ID of the entity. - deezer_type: The type of entity (track, album, playlist, artist, episode, - artist_top_tracks, artist_albums, artist_related, + deezer_type: The type of entity (track, album, playlist, artist, episode, + artist_top_tracks, artist_albums, artist_related, artist_radio, artist_playlists). - limit (int, optional): The maximum number of items to return. Used for + limit (int, optional): The maximum number of items to return. Used for artist_top_tracks, artist_albums, artist_playlists. - Deezer API methods usually have their own defaults (e.g., 25) + Deezer API methods usually have their own defaults (e.g., 25) if limit is not provided or None is passed to them. - + Returns: Dictionary with the entity information. Raises: ValueError: If deezer_type is unsupported. Various exceptions from DeezerAPI (NoDataApi, QuotaExceeded, requests.exceptions.RequestException, etc.) """ - logger.debug(f"Fetching Deezer info for ID {deezer_id}, type {deezer_type}, limit {limit}") + logger.debug( + f"Fetching Deezer info for ID {deezer_id}, type {deezer_type}, limit {limit}" + ) # DeezerAPI uses class methods; its @classmethod __init__ handles setup. # No specific ARL or account handling here as DeezerAPI seems to use general endpoints. @@ -121,11 +129,11 @@ def get_deezer_info(deezer_id, deezer_type, limit=None): elif deezer_type == "artist_top_tracks": if limit is not None: return DeezerAPI.get_artist_top_tracks(deezer_id, limit=limit) - return DeezerAPI.get_artist_top_tracks(deezer_id) # Use API default limit - elif deezer_type == "artist_albums": # Maps to get_artist_top_albums + return DeezerAPI.get_artist_top_tracks(deezer_id) # Use API default limit + elif deezer_type == "artist_albums": # Maps to get_artist_top_albums if limit is not None: return DeezerAPI.get_artist_top_albums(deezer_id, limit=limit) - return DeezerAPI.get_artist_top_albums(deezer_id) # Use API default limit + return DeezerAPI.get_artist_top_albums(deezer_id) # Use API default limit elif deezer_type == "artist_related": return DeezerAPI.get_artist_related(deezer_id) elif deezer_type == "artist_radio": @@ -133,7 +141,7 @@ def get_deezer_info(deezer_id, deezer_type, limit=None): elif deezer_type == "artist_playlists": if limit is not None: return DeezerAPI.get_artist_top_playlists(deezer_id, limit=limit) - return DeezerAPI.get_artist_top_playlists(deezer_id) # Use API default limit + return DeezerAPI.get_artist_top_playlists(deezer_id) # Use API default limit else: logger.error(f"Unsupported Deezer type: {deezer_type}") raise ValueError(f"Unsupported Deezer type: {deezer_type}") diff --git a/routes/utils/history_manager.py b/routes/utils/history_manager.py index d4d5fb0..2dba42c 100644 --- a/routes/utils/history_manager.py +++ b/routes/utils/history_manager.py @@ -6,29 +6,30 @@ from pathlib import Path logger = logging.getLogger(__name__) -HISTORY_DIR = Path('./data/history') -HISTORY_DB_FILE = HISTORY_DIR / 'download_history.db' +HISTORY_DIR = Path("./data/history") +HISTORY_DB_FILE = HISTORY_DIR / "download_history.db" EXPECTED_COLUMNS = { - 'task_id': 'TEXT PRIMARY KEY', - 'download_type': 'TEXT', - 'item_name': 'TEXT', - 'item_artist': 'TEXT', - 'item_album': 'TEXT', - 'item_url': 'TEXT', - 'spotify_id': 'TEXT', - 'status_final': 'TEXT', # 'COMPLETED', 'ERROR', 'CANCELLED' - 'error_message': 'TEXT', - 'timestamp_added': 'REAL', - 'timestamp_completed': 'REAL', - 'original_request_json': 'TEXT', - 'last_status_obj_json': 'TEXT', - 'service_used': 'TEXT', - 'quality_profile': 'TEXT', - 'convert_to': 'TEXT', - 'bitrate': 'TEXT' + "task_id": "TEXT PRIMARY KEY", + "download_type": "TEXT", + "item_name": "TEXT", + "item_artist": "TEXT", + "item_album": "TEXT", + "item_url": "TEXT", + "spotify_id": "TEXT", + "status_final": "TEXT", # 'COMPLETED', 'ERROR', 'CANCELLED' + "error_message": "TEXT", + "timestamp_added": "REAL", + "timestamp_completed": "REAL", + "original_request_json": "TEXT", + "last_status_obj_json": "TEXT", + "service_used": "TEXT", + "quality_profile": "TEXT", + "convert_to": "TEXT", + "bitrate": "TEXT", } + def init_history_db(): """Initializes the download history database, creates the table if it doesn't exist, and adds any missing columns to an existing table.""" @@ -42,7 +43,7 @@ def init_history_db(): # The primary key constraint is handled by the initial CREATE TABLE. # If 'task_id' is missing, it cannot be added as PRIMARY KEY to an existing table # without complex migrations. We assume 'task_id' will exist if the table exists. - create_table_sql = f""" + create_table_sql = """ CREATE TABLE IF NOT EXISTS download_history ( task_id TEXT PRIMARY KEY, download_type TEXT, @@ -74,42 +75,54 @@ def init_history_db(): added_columns = False for col_name, col_type in EXPECTED_COLUMNS.items(): if col_name not in existing_column_names: - if 'PRIMARY KEY' in col_type.upper() and col_name == 'task_id': + if "PRIMARY KEY" in col_type.upper() and col_name == "task_id": # This case should be handled by CREATE TABLE, but as a safeguard: # If task_id is somehow missing and table exists, this is a problem. # Adding it as PK here is complex and might fail if data exists. # For now, we assume CREATE TABLE handles the PK. # If we were to add it, it would be 'ALTER TABLE download_history ADD COLUMN task_id TEXT;' # and then potentially a separate step to make it PK if table is empty, which is non-trivial. - logger.warning(f"Column '{col_name}' is part of PRIMARY KEY and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN.") + logger.warning( + f"Column '{col_name}' is part of PRIMARY KEY and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN." + ) continue # For other columns, just add them. # Remove PRIMARY KEY from type definition if present, as it's only for table creation. - col_type_for_add = col_type.replace(' PRIMARY KEY', '').strip() + col_type_for_add = col_type.replace(" PRIMARY KEY", "").strip() try: - cursor.execute(f"ALTER TABLE download_history ADD COLUMN {col_name} {col_type_for_add}") - logger.info(f"Added missing column '{col_name} {col_type_for_add}' to download_history table.") + cursor.execute( + f"ALTER TABLE download_history ADD COLUMN {col_name} {col_type_for_add}" + ) + logger.info( + f"Added missing column '{col_name} {col_type_for_add}' to download_history table." + ) added_columns = True except sqlite3.OperationalError as alter_e: # This might happen if a column (e.g. task_id) without "PRIMARY KEY" is added by this loop # but the initial create table already made it a primary key. # Or other more complex scenarios. - logger.warning(f"Could not add column '{col_name}': {alter_e}. It might already exist or there's a schema mismatch.") - + logger.warning( + f"Could not add column '{col_name}': {alter_e}. It might already exist or there's a schema mismatch." + ) if added_columns: conn.commit() logger.info(f"Download history table schema updated at {HISTORY_DB_FILE}") else: - logger.info(f"Download history database schema is up-to-date at {HISTORY_DB_FILE}") + logger.info( + f"Download history database schema is up-to-date at {HISTORY_DB_FILE}" + ) except sqlite3.Error as e: - logger.error(f"Error initializing download history database: {e}", exc_info=True) + logger.error( + f"Error initializing download history database: {e}", exc_info=True + ) finally: if conn: conn.close() + def add_entry_to_history(history_data: dict): """Adds or replaces an entry in the download_history table. @@ -118,11 +131,23 @@ def add_entry_to_history(history_data: dict): Expected keys match the table columns. """ required_keys = [ - 'task_id', 'download_type', 'item_name', 'item_artist', 'item_album', - 'item_url', 'spotify_id', 'status_final', 'error_message', - 'timestamp_added', 'timestamp_completed', 'original_request_json', - 'last_status_obj_json', 'service_used', 'quality_profile', - 'convert_to', 'bitrate' + "task_id", + "download_type", + "item_name", + "item_artist", + "item_album", + "item_url", + "spotify_id", + "status_final", + "error_message", + "timestamp_added", + "timestamp_completed", + "original_request_json", + "last_status_obj_json", + "service_used", + "quality_profile", + "convert_to", + "bitrate", ] # Ensure all keys are present, filling with None if not for key in required_keys: @@ -132,7 +157,8 @@ def add_entry_to_history(history_data: dict): try: conn = sqlite3.connect(HISTORY_DB_FILE) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ INSERT OR REPLACE INTO download_history ( task_id, download_type, item_name, item_artist, item_album, item_url, spotify_id, status_final, error_message, @@ -140,26 +166,49 @@ def add_entry_to_history(history_data: dict): last_status_obj_json, service_used, quality_profile, convert_to, bitrate ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, ( - history_data['task_id'], history_data['download_type'], history_data['item_name'], - history_data['item_artist'], history_data['item_album'], history_data['item_url'], - history_data['spotify_id'], history_data['status_final'], history_data['error_message'], - history_data['timestamp_added'], history_data['timestamp_completed'], - history_data['original_request_json'], history_data['last_status_obj_json'], - history_data['service_used'], history_data['quality_profile'], - history_data['convert_to'], history_data['bitrate'] - )) + """, + ( + history_data["task_id"], + history_data["download_type"], + history_data["item_name"], + history_data["item_artist"], + history_data["item_album"], + history_data["item_url"], + history_data["spotify_id"], + history_data["status_final"], + history_data["error_message"], + history_data["timestamp_added"], + history_data["timestamp_completed"], + history_data["original_request_json"], + history_data["last_status_obj_json"], + history_data["service_used"], + history_data["quality_profile"], + history_data["convert_to"], + history_data["bitrate"], + ), + ) conn.commit() - logger.info(f"Added/Updated history for task_id: {history_data['task_id']}, status: {history_data['status_final']}") + logger.info( + f"Added/Updated history for task_id: {history_data['task_id']}, status: {history_data['status_final']}" + ) except sqlite3.Error as e: - logger.error(f"Error adding entry to download history for task_id {history_data.get('task_id')}: {e}", exc_info=True) + logger.error( + f"Error adding entry to download history for task_id {history_data.get('task_id')}: {e}", + exc_info=True, + ) except Exception as e: - logger.error(f"Unexpected error adding to history for task_id {history_data.get('task_id')}: {e}", exc_info=True) + logger.error( + f"Unexpected error adding to history for task_id {history_data.get('task_id')}: {e}", + exc_info=True, + ) finally: if conn: conn.close() -def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_order='DESC', filters=None): + +def get_history_entries( + limit=25, offset=0, sort_by="timestamp_completed", sort_order="DESC", filters=None +): """Retrieves entries from the download_history table with pagination, sorting, and filtering. Args: @@ -189,10 +238,10 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_ if filters: for column, value in filters.items(): # Basic security: ensure column is a valid one (alphanumeric + underscore) - if column.replace('_', '').isalnum(): + if column.replace("_", "").isalnum(): where_clauses.append(f"{column} = ?") params.append(value) - + if where_clauses: where_sql = " WHERE " + " AND ".join(where_clauses) count_query += where_sql @@ -204,23 +253,33 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_ # Validate sort_by and sort_order to prevent SQL injection valid_sort_columns = [ - 'task_id', 'download_type', 'item_name', 'item_artist', 'item_album', - 'item_url', 'status_final', 'timestamp_added', 'timestamp_completed', - 'service_used', 'quality_profile', 'convert_to', 'bitrate' + "task_id", + "download_type", + "item_name", + "item_artist", + "item_album", + "item_url", + "status_final", + "timestamp_added", + "timestamp_completed", + "service_used", + "quality_profile", + "convert_to", + "bitrate", ] if sort_by not in valid_sort_columns: - sort_by = 'timestamp_completed' # Default sort - + sort_by = "timestamp_completed" # Default sort + sort_order_upper = sort_order.upper() - if sort_order_upper not in ['ASC', 'DESC']: - sort_order_upper = 'DESC' + if sort_order_upper not in ["ASC", "DESC"]: + sort_order_upper = "DESC" select_query += f" ORDER BY {sort_by} {sort_order_upper} LIMIT ? OFFSET ?" params.extend([limit, offset]) cursor.execute(select_query, params) rows = cursor.fetchall() - + # Convert rows to list of dicts entries = [dict(row) for row in rows] return entries, total_count @@ -232,72 +291,79 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_ if conn: conn.close() -if __name__ == '__main__': + +if __name__ == "__main__": # For testing purposes logging.basicConfig(level=logging.INFO) init_history_db() - + sample_data_complete = { - 'task_id': 'test_task_123', - 'download_type': 'track', - 'item_name': 'Test Song', - 'item_artist': 'Test Artist', - 'item_album': 'Test Album', - 'item_url': 'http://spotify.com/track/123', - 'spotify_id': '123', - 'status_final': 'COMPLETED', - 'error_message': None, - 'timestamp_added': time.time() - 3600, - 'timestamp_completed': time.time(), - 'original_request_json': json.dumps({'param1': 'value1'}), - 'last_status_obj_json': json.dumps({'status': 'complete', 'message': 'Finished!'}), - 'service_used': 'Spotify (Primary)', - 'quality_profile': 'NORMAL', - 'convert_to': None, - 'bitrate': None + "task_id": "test_task_123", + "download_type": "track", + "item_name": "Test Song", + "item_artist": "Test Artist", + "item_album": "Test Album", + "item_url": "http://spotify.com/track/123", + "spotify_id": "123", + "status_final": "COMPLETED", + "error_message": None, + "timestamp_added": time.time() - 3600, + "timestamp_completed": time.time(), + "original_request_json": json.dumps({"param1": "value1"}), + "last_status_obj_json": json.dumps( + {"status": "complete", "message": "Finished!"} + ), + "service_used": "Spotify (Primary)", + "quality_profile": "NORMAL", + "convert_to": None, + "bitrate": None, } add_entry_to_history(sample_data_complete) sample_data_error = { - 'task_id': 'test_task_456', - 'download_type': 'album', - 'item_name': 'Another Album', - 'item_artist': 'Another Artist', - 'item_album': 'Another Album', # For albums, item_name and item_album are often the same - 'item_url': 'http://spotify.com/album/456', - 'spotify_id': '456', - 'status_final': 'ERROR', - 'error_message': 'Download failed due to network issue.', - 'timestamp_added': time.time() - 7200, - 'timestamp_completed': time.time() - 60, - 'original_request_json': json.dumps({'param2': 'value2'}), - 'last_status_obj_json': json.dumps({'status': 'error', 'error': 'Network issue'}), - 'service_used': 'Deezer', - 'quality_profile': 'MP3_320', - 'convert_to': 'mp3', - 'bitrate': '320' + "task_id": "test_task_456", + "download_type": "album", + "item_name": "Another Album", + "item_artist": "Another Artist", + "item_album": "Another Album", # For albums, item_name and item_album are often the same + "item_url": "http://spotify.com/album/456", + "spotify_id": "456", + "status_final": "ERROR", + "error_message": "Download failed due to network issue.", + "timestamp_added": time.time() - 7200, + "timestamp_completed": time.time() - 60, + "original_request_json": json.dumps({"param2": "value2"}), + "last_status_obj_json": json.dumps( + {"status": "error", "error": "Network issue"} + ), + "service_used": "Deezer", + "quality_profile": "MP3_320", + "convert_to": "mp3", + "bitrate": "320", } add_entry_to_history(sample_data_error) # Test updating an entry updated_data_complete = { - 'task_id': 'test_task_123', - 'download_type': 'track', - 'item_name': 'Test Song (Updated)', - 'item_artist': 'Test Artist', - 'item_album': 'Test Album II', - 'item_url': 'http://spotify.com/track/123', - 'spotify_id': '123', - 'status_final': 'COMPLETED', - 'error_message': None, - 'timestamp_added': time.time() - 3600, - 'timestamp_completed': time.time() + 100, # Updated completion time - 'original_request_json': json.dumps({'param1': 'value1', 'new_param': 'added'}), - 'last_status_obj_json': json.dumps({'status': 'complete', 'message': 'Finished! With update.'}), - 'service_used': 'Spotify (Deezer Fallback)', - 'quality_profile': 'HIGH', - 'convert_to': 'flac', - 'bitrate': None + "task_id": "test_task_123", + "download_type": "track", + "item_name": "Test Song (Updated)", + "item_artist": "Test Artist", + "item_album": "Test Album II", + "item_url": "http://spotify.com/track/123", + "spotify_id": "123", + "status_final": "COMPLETED", + "error_message": None, + "timestamp_added": time.time() - 3600, + "timestamp_completed": time.time() + 100, # Updated completion time + "original_request_json": json.dumps({"param1": "value1", "new_param": "added"}), + "last_status_obj_json": json.dumps( + {"status": "complete", "message": "Finished! With update."} + ), + "service_used": "Spotify (Deezer Fallback)", + "quality_profile": "HIGH", + "convert_to": "flac", + "bitrate": None, } add_entry_to_history(updated_data_complete) @@ -310,13 +376,17 @@ if __name__ == '__main__': print(entry) print("\nFetching history entries (sorted by item_name ASC, limit 2, offset 1):") - entries_sorted, total_sorted = get_history_entries(limit=2, offset=1, sort_by='item_name', sort_order='ASC') + entries_sorted, total_sorted = get_history_entries( + limit=2, offset=1, sort_by="item_name", sort_order="ASC" + ) print(f"Total entries (should be same as above): {total_sorted}") for entry in entries_sorted: print(entry) - + print("\nFetching history entries with filter (status_final = COMPLETED):") - entries_filtered, total_filtered = get_history_entries(filters={'status_final': 'COMPLETED'}) + entries_filtered, total_filtered = get_history_entries( + filters={"status_final": "COMPLETED"} + ) print(f"Total COMPLETED entries: {total_filtered}") for entry in entries_filtered: - print(entry) \ No newline at end of file + print(entry) diff --git a/routes/utils/playlist.py b/routes/utils/playlist.py index 275f696..3266e17 100755 --- a/routes/utils/playlist.py +++ b/routes/utils/playlist.py @@ -1,11 +1,9 @@ -import os -import json import traceback from deezspot.spotloader import SpoLogin from deezspot.deezloader import DeeLogin from pathlib import Path from routes.utils.credentials import get_credential, _get_global_spotify_api_creds -from routes.utils.celery_config import get_config_params + def download_playlist( url, @@ -23,56 +21,68 @@ def download_playlist( max_retries=3, progress_callback=None, convert_to=None, - bitrate=None + bitrate=None, ): try: # Detect URL source (Spotify or Deezer) from URL - is_spotify_url = 'open.spotify.com' in url.lower() - is_deezer_url = 'deezer.com' in url.lower() - - service = '' + is_spotify_url = "open.spotify.com" in url.lower() + is_deezer_url = "deezer.com" in url.lower() + + service = "" if is_spotify_url: - service = 'spotify' + service = "spotify" elif is_deezer_url: - service = 'deezer' + service = "deezer" else: error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com" print(f"ERROR: {error_msg}") raise ValueError(error_msg) - + print(f"DEBUG: playlist.py - Service determined from URL: {service}") - print(f"DEBUG: playlist.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'") + print( + f"DEBUG: playlist.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'" + ) # Get global Spotify API credentials - global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds() + global_spotify_client_id, global_spotify_client_secret = ( + _get_global_spotify_api_creds() + ) if not global_spotify_client_id or not global_spotify_client_secret: warning_msg = "WARN: playlist.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail." print(warning_msg) - if service == 'spotify': - if fallback: # Fallback is a Deezer account name for a Spotify URL - if quality is None: quality = 'FLAC' # Deezer quality for first attempt - if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails) - + if service == "spotify": + if fallback: # Fallback is a Deezer account name for a Spotify URL + if quality is None: + quality = "FLAC" # Deezer quality for first attempt + if fall_quality is None: + fall_quality = ( + "HIGH" # Spotify quality for fallback (if Deezer fails) + ) + deezer_error = None try: # Attempt 1: Deezer via download_playlistspo (using 'fallback' as Deezer account name) - print(f"DEBUG: playlist.py - Spotify URL. Attempt 1: Deezer (account: {fallback})") - deezer_fallback_creds = get_credential('deezer', fallback) - arl = deezer_fallback_creds.get('arl') + print( + f"DEBUG: playlist.py - Spotify URL. Attempt 1: Deezer (account: {fallback})" + ) + deezer_fallback_creds = get_credential("deezer", fallback) + arl = deezer_fallback_creds.get("arl") if not arl: - raise ValueError(f"ARL not found for Deezer account '{fallback}'.") - + raise ValueError( + f"ARL not found for Deezer account '{fallback}'." + ) + dl = DeeLogin( arl=arl, spotify_client_id=global_spotify_client_id, spotify_client_secret=global_spotify_client_secret, - progress_callback=progress_callback + progress_callback=progress_callback, ) dl.download_playlistspo( - link_playlist=url, # Spotify URL + link_playlist=url, # Spotify URL output_dir="./downloads", - quality_download=quality, # Deezer quality + quality_download=quality, # Deezer quality recursive_quality=True, recursive_download=False, not_interface=False, @@ -85,35 +95,50 @@ def download_playlist( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, + ) + print( + f"DEBUG: playlist.py - Playlist download via Deezer (account: {fallback}) successful for Spotify URL." ) - print(f"DEBUG: playlist.py - Playlist download via Deezer (account: {fallback}) successful for Spotify URL.") except Exception as e: deezer_error = e - print(f"ERROR: playlist.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}") + print( + f"ERROR: playlist.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}" + ) traceback.print_exc() - print(f"DEBUG: playlist.py - Attempting Spotify direct download (account: {main} for blob)...") - + print( + f"DEBUG: playlist.py - Attempting Spotify direct download (account: {main} for blob)..." + ) + # Attempt 2: Spotify direct via download_playlist (using 'main' as Spotify account for blob) try: - if not global_spotify_client_id or not global_spotify_client_secret: - raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") - - spotify_main_creds = get_credential('spotify', main) # For blob path - blob_file_path = spotify_main_creds.get('blob_file_path') + if ( + not global_spotify_client_id + or not global_spotify_client_secret + ): + raise ValueError( + "Global Spotify API credentials (client_id/secret) not configured for Spotify download." + ) + + spotify_main_creds = get_credential( + "spotify", main + ) # For blob path + blob_file_path = spotify_main_creds.get("blob_file_path") if not Path(blob_file_path).exists(): - raise FileNotFoundError(f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'") + raise FileNotFoundError( + f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'" + ) spo = SpoLogin( credentials_path=blob_file_path, spotify_client_id=global_spotify_client_id, spotify_client_secret=global_spotify_client_secret, - progress_callback=progress_callback + progress_callback=progress_callback, ) spo.download_playlist( - link_playlist=url, # Spotify URL + link_playlist=url, # Spotify URL output_dir="./downloads", - quality_download=fall_quality, # Spotify quality + quality_download=fall_quality, # Spotify quality recursive_quality=True, recursive_download=False, not_interface=False, @@ -127,38 +152,49 @@ def download_playlist( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, + ) + print( + f"DEBUG: playlist.py - Spotify direct download (account: {main} for blob) successful." ) - print(f"DEBUG: playlist.py - Spotify direct download (account: {main} for blob) successful.") except Exception as e2: - print(f"ERROR: playlist.py - Spotify direct download (account: {main} for blob) also failed: {e2}") + print( + f"ERROR: playlist.py - Spotify direct download (account: {main} for blob) also failed: {e2}" + ) raise RuntimeError( f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. " f"Deezer error: {deezer_error}, Spotify error: {e2}" ) from e2 else: # Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob) - if quality is None: quality = 'HIGH' # Default Spotify quality - print(f"DEBUG: playlist.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}") - - if not global_spotify_client_id or not global_spotify_client_secret: - raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") + if quality is None: + quality = "HIGH" # Default Spotify quality + print( + f"DEBUG: playlist.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}" + ) - spotify_main_creds = get_credential('spotify', main) # For blob path - blob_file_path = spotify_main_creds.get('blob_file_path') + if not global_spotify_client_id or not global_spotify_client_secret: + raise ValueError( + "Global Spotify API credentials (client_id/secret) not configured for Spotify download." + ) + + spotify_main_creds = get_credential("spotify", main) # For blob path + blob_file_path = spotify_main_creds.get("blob_file_path") if not Path(blob_file_path).exists(): - raise FileNotFoundError(f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'") + raise FileNotFoundError( + f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'" + ) spo = SpoLogin( credentials_path=blob_file_path, spotify_client_id=global_spotify_client_id, spotify_client_secret=global_spotify_client_secret, - progress_callback=progress_callback + progress_callback=progress_callback, ) spo.download_playlist( link_playlist=url, output_dir="./downloads", - quality_download=quality, + quality_download=quality, recursive_quality=True, recursive_download=False, not_interface=False, @@ -172,30 +208,35 @@ def download_playlist( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, ) - print(f"DEBUG: playlist.py - Direct Spotify download (account: {main} for blob) successful.") - - elif service == 'deezer': + print( + f"DEBUG: playlist.py - Direct Spotify download (account: {main} for blob) successful." + ) + + elif service == "deezer": # Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL) - if quality is None: quality = 'FLAC' # Default Deezer quality - print(f"DEBUG: playlist.py - Deezer URL. Direct download with Deezer account: {main}") - deezer_main_creds = get_credential('deezer', main) # For ARL - arl = deezer_main_creds.get('arl') + if quality is None: + quality = "FLAC" # Default Deezer quality + print( + f"DEBUG: playlist.py - Deezer URL. Direct download with Deezer account: {main}" + ) + deezer_main_creds = get_credential("deezer", main) # For ARL + arl = deezer_main_creds.get("arl") if not arl: raise ValueError(f"ARL not found for Deezer account '{main}'.") dl = DeeLogin( - arl=arl, # Account specific ARL - spotify_client_id=global_spotify_client_id, # Global Spotify keys - spotify_client_secret=global_spotify_client_secret, # Global Spotify keys - progress_callback=progress_callback + arl=arl, # Account specific ARL + spotify_client_id=global_spotify_client_id, # Global Spotify keys + spotify_client_secret=global_spotify_client_secret, # Global Spotify keys + progress_callback=progress_callback, ) - dl.download_playlistdee( # Deezer URL, download via Deezer + dl.download_playlistdee( # Deezer URL, download via Deezer link_playlist=url, output_dir="./downloads", quality_download=quality, - recursive_quality=False, # Usually False for playlists to get individual track qualities + recursive_quality=False, # Usually False for playlists to get individual track qualities recursive_download=False, make_zip=False, custom_dir_format=custom_dir_format, @@ -206,9 +247,11 @@ def download_playlist( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, + ) + print( + f"DEBUG: playlist.py - Direct Deezer download (account: {main}) successful." ) - print(f"DEBUG: playlist.py - Direct Deezer download (account: {main}) successful.") else: # Should be caught by initial service check, but as a safeguard raise ValueError(f"Unsupported service determined: {service}") diff --git a/routes/utils/search.py b/routes/utils/search.py index 12e8dea..b96bb28 100755 --- a/routes/utils/search.py +++ b/routes/utils/search.py @@ -1,50 +1,58 @@ from deezspot.easy_spoty import Spo -import json -from pathlib import Path import logging from routes.utils.credentials import get_credential, _get_global_spotify_api_creds # Configure logger logger = logging.getLogger(__name__) -def search( - query: str, - search_type: str, - limit: int = 3, - main: str = None -) -> dict: - logger.info(f"Search requested: query='{query}', type={search_type}, limit={limit}, main_account_name={main}") - + +def search(query: str, search_type: str, limit: int = 3, main: str = None) -> dict: + logger.info( + f"Search requested: query='{query}', type={search_type}, limit={limit}, main_account_name={main}" + ) + client_id, client_secret = _get_global_spotify_api_creds() - + if not client_id or not client_secret: - logger.error("Global Spotify API client_id or client_secret not configured in ./data/creds/search.json.") - raise ValueError("Spotify API credentials are not configured globally for search.") + logger.error( + "Global Spotify API client_id or client_secret not configured in ./data/creds/search.json." + ) + raise ValueError( + "Spotify API credentials are not configured globally for search." + ) if main: - logger.debug(f"Spotify account context '{main}' was provided for search. API keys are global, but this account might be used for other context by Spo if relevant.") + logger.debug( + f"Spotify account context '{main}' was provided for search. API keys are global, but this account might be used for other context by Spo if relevant." + ) try: - get_credential('spotify', main) + get_credential("spotify", main) logger.debug(f"Spotify account '{main}' exists.") except FileNotFoundError: - logger.warning(f"Spotify account '{main}' provided for search context not found in credentials. Search will proceed with global API keys.") + logger.warning( + f"Spotify account '{main}' provided for search context not found in credentials. Search will proceed with global API keys." + ) except Exception as e: - logger.warning(f"Error checking existence of Spotify account '{main}': {e}. Search will proceed with global API keys.") + logger.warning( + f"Error checking existence of Spotify account '{main}': {e}. Search will proceed with global API keys." + ) else: - logger.debug("No specific 'main' account context provided for search. Using global API keys.") - - logger.debug(f"Initializing Spotify client with global API credentials for search.") + logger.debug( + "No specific 'main' account context provided for search. Using global API keys." + ) + + logger.debug("Initializing Spotify client with global API credentials for search.") Spo.__init__(client_id, client_secret) - logger.debug(f"Executing Spotify search with query='{query}', type={search_type}, limit={limit}") + logger.debug( + f"Executing Spotify search with query='{query}', type={search_type}, limit={limit}" + ) try: - spotify_response = Spo.search( - query=query, - search_type=search_type, - limit=limit - ) + spotify_response = Spo.search(query=query, search_type=search_type, limit=limit) logger.info(f"Search completed successfully for query: '{query}'") return spotify_response except Exception as e: - logger.error(f"Error during Spotify search for query '{query}': {e}", exc_info=True) + logger.error( + f"Error during Spotify search for query '{query}': {e}", exc_info=True + ) raise diff --git a/routes/utils/track.py b/routes/utils/track.py index 7dd43cc..79c40bc 100755 --- a/routes/utils/track.py +++ b/routes/utils/track.py @@ -1,11 +1,12 @@ -import os -import json import traceback from deezspot.spotloader import SpoLogin from deezspot.deezloader import DeeLogin -from pathlib import Path -from routes.utils.credentials import get_credential, _get_global_spotify_api_creds, get_spotify_blob_path -from routes.utils.celery_config import get_config_params +from routes.utils.credentials import ( + get_credential, + _get_global_spotify_api_creds, + get_spotify_blob_path, +) + def download_track( url, @@ -23,28 +24,32 @@ def download_track( max_retries=3, progress_callback=None, convert_to=None, - bitrate=None + bitrate=None, ): try: # Detect URL source (Spotify or Deezer) from URL - is_spotify_url = 'open.spotify.com' in url.lower() - is_deezer_url = 'deezer.com' in url.lower() - - service = '' + is_spotify_url = "open.spotify.com" in url.lower() + is_deezer_url = "deezer.com" in url.lower() + + service = "" if is_spotify_url: - service = 'spotify' + service = "spotify" elif is_deezer_url: - service = 'deezer' + service = "deezer" else: error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com" print(f"ERROR: {error_msg}") raise ValueError(error_msg) - + print(f"DEBUG: track.py - Service determined from URL: {service}") - print(f"DEBUG: track.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'") + print( + f"DEBUG: track.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'" + ) # Get global Spotify API credentials for SpoLogin and DeeLogin (if it uses Spotify search) - global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds() + global_spotify_client_id, global_spotify_client_secret = ( + _get_global_spotify_api_creds() + ) if not global_spotify_client_id or not global_spotify_client_secret: # This is a critical failure if Spotify operations are involved warning_msg = "WARN: track.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail." @@ -52,31 +57,39 @@ def download_track( # Depending on flow, might want to raise error here if service is 'spotify' # For now, let it proceed and fail at SpoLogin/DeeLogin init if keys are truly needed and missing. - if service == 'spotify': - if fallback: # Fallback is a Deezer account name for a Spotify URL - if quality is None: quality = 'FLAC' # Deezer quality for first attempt - if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails) - + if service == "spotify": + if fallback: # Fallback is a Deezer account name for a Spotify URL + if quality is None: + quality = "FLAC" # Deezer quality for first attempt + if fall_quality is None: + fall_quality = ( + "HIGH" # Spotify quality for fallback (if Deezer fails) + ) + deezer_error = None try: # Attempt 1: Deezer via download_trackspo (using 'fallback' as Deezer account name) - print(f"DEBUG: track.py - Spotify URL. Attempt 1: Deezer (account: {fallback})") - deezer_fallback_creds = get_credential('deezer', fallback) - arl = deezer_fallback_creds.get('arl') + print( + f"DEBUG: track.py - Spotify URL. Attempt 1: Deezer (account: {fallback})" + ) + deezer_fallback_creds = get_credential("deezer", fallback) + arl = deezer_fallback_creds.get("arl") if not arl: - raise ValueError(f"ARL not found for Deezer account '{fallback}'.") - + raise ValueError( + f"ARL not found for Deezer account '{fallback}'." + ) + dl = DeeLogin( arl=arl, - spotify_client_id=global_spotify_client_id, # Global creds - spotify_client_secret=global_spotify_client_secret, # Global creds - progress_callback=progress_callback + spotify_client_id=global_spotify_client_id, # Global creds + spotify_client_secret=global_spotify_client_secret, # Global creds + progress_callback=progress_callback, ) # download_trackspo means: Spotify URL, download via Deezer dl.download_trackspo( - link_track=url, # Spotify URL + link_track=url, # Spotify URL output_dir="./downloads", - quality_download=quality, # Deezer quality + quality_download=quality, # Deezer quality recursive_quality=False, recursive_download=False, not_interface=False, @@ -87,35 +100,52 @@ def download_track( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, + ) + print( + f"DEBUG: track.py - Track download via Deezer (account: {fallback}) successful for Spotify URL." ) - print(f"DEBUG: track.py - Track download via Deezer (account: {fallback}) successful for Spotify URL.") except Exception as e: deezer_error = e - print(f"ERROR: track.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}") + print( + f"ERROR: track.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}" + ) traceback.print_exc() - print(f"DEBUG: track.py - Attempting Spotify direct download (account: {main})...") - + print( + f"DEBUG: track.py - Attempting Spotify direct download (account: {main})..." + ) + # Attempt 2: Spotify direct via download_track (using 'main' as Spotify account for blob) try: - if not global_spotify_client_id or not global_spotify_client_secret: - raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") - + if ( + not global_spotify_client_id + or not global_spotify_client_secret + ): + raise ValueError( + "Global Spotify API credentials (client_id/secret) not configured for Spotify download." + ) + # Use get_spotify_blob_path directly - blob_file_path = get_spotify_blob_path(main) - if not blob_file_path.exists(): # Check existence on the Path object - raise FileNotFoundError(f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'") + blob_file_path = get_spotify_blob_path(main) + if ( + not blob_file_path.exists() + ): # Check existence on the Path object + raise FileNotFoundError( + f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'" + ) spo = SpoLogin( - credentials_path=str(blob_file_path), # Account specific blob - spotify_client_id=global_spotify_client_id, # Global API keys - spotify_client_secret=global_spotify_client_secret, # Global API keys - progress_callback=progress_callback + credentials_path=str( + blob_file_path + ), # Account specific blob + spotify_client_id=global_spotify_client_id, # Global API keys + spotify_client_secret=global_spotify_client_secret, # Global API keys + progress_callback=progress_callback, ) spo.download_track( - link_track=url, # Spotify URL + link_track=url, # Spotify URL output_dir="./downloads", - quality_download=fall_quality, # Spotify quality + quality_download=fall_quality, # Spotify quality recursive_quality=False, recursive_download=False, not_interface=False, @@ -128,38 +158,49 @@ def download_track( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, + ) + print( + f"DEBUG: track.py - Spotify direct download (account: {main} for blob) successful." ) - print(f"DEBUG: track.py - Spotify direct download (account: {main} for blob) successful.") except Exception as e2: - print(f"ERROR: track.py - Spotify direct download (account: {main} for blob) also failed: {e2}") + print( + f"ERROR: track.py - Spotify direct download (account: {main} for blob) also failed: {e2}" + ) raise RuntimeError( f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. " f"Deezer error: {deezer_error}, Spotify error: {e2}" ) from e2 else: # Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob) - if quality is None: quality = 'HIGH' # Default Spotify quality - print(f"DEBUG: track.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}") - + if quality is None: + quality = "HIGH" # Default Spotify quality + print( + f"DEBUG: track.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}" + ) + if not global_spotify_client_id or not global_spotify_client_secret: - raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") + raise ValueError( + "Global Spotify API credentials (client_id/secret) not configured for Spotify download." + ) # Use get_spotify_blob_path directly blob_file_path = get_spotify_blob_path(main) - if not blob_file_path.exists(): # Check existence on the Path object - raise FileNotFoundError(f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'") + if not blob_file_path.exists(): # Check existence on the Path object + raise FileNotFoundError( + f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'" + ) spo = SpoLogin( - credentials_path=str(blob_file_path), # Account specific blob - spotify_client_id=global_spotify_client_id, # Global API keys - spotify_client_secret=global_spotify_client_secret, # Global API keys - progress_callback=progress_callback + credentials_path=str(blob_file_path), # Account specific blob + spotify_client_id=global_spotify_client_id, # Global API keys + spotify_client_secret=global_spotify_client_secret, # Global API keys + progress_callback=progress_callback, ) spo.download_track( link_track=url, output_dir="./downloads", - quality_download=quality, + quality_download=quality, recursive_quality=False, recursive_download=False, not_interface=False, @@ -172,26 +213,31 @@ def download_track( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, ) - print(f"DEBUG: track.py - Direct Spotify download (account: {main} for blob) successful.") - - elif service == 'deezer': + print( + f"DEBUG: track.py - Direct Spotify download (account: {main} for blob) successful." + ) + + elif service == "deezer": # Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL) - if quality is None: quality = 'FLAC' # Default Deezer quality - print(f"DEBUG: track.py - Deezer URL. Direct download with Deezer account: {main}") - deezer_main_creds = get_credential('deezer', main) # For ARL - arl = deezer_main_creds.get('arl') + if quality is None: + quality = "FLAC" # Default Deezer quality + print( + f"DEBUG: track.py - Deezer URL. Direct download with Deezer account: {main}" + ) + deezer_main_creds = get_credential("deezer", main) # For ARL + arl = deezer_main_creds.get("arl") if not arl: raise ValueError(f"ARL not found for Deezer account '{main}'.") dl = DeeLogin( - arl=arl, # Account specific ARL - spotify_client_id=global_spotify_client_id, # Global Spotify keys for internal Spo use by DeeLogin - spotify_client_secret=global_spotify_client_secret, # Global Spotify keys - progress_callback=progress_callback + arl=arl, # Account specific ARL + spotify_client_id=global_spotify_client_id, # Global Spotify keys for internal Spo use by DeeLogin + spotify_client_secret=global_spotify_client_secret, # Global Spotify keys + progress_callback=progress_callback, ) - dl.download_trackdee( # Deezer URL, download via Deezer + dl.download_trackdee( # Deezer URL, download via Deezer link_track=url, output_dir="./downloads", quality_download=quality, @@ -205,12 +251,14 @@ def download_track( retry_delay_increase=retry_delay_increase, max_retries=max_retries, convert_to=convert_to, - bitrate=bitrate + bitrate=bitrate, + ) + print( + f"DEBUG: track.py - Direct Deezer download (account: {main}) successful." ) - print(f"DEBUG: track.py - Direct Deezer download (account: {main}) successful.") else: # Should be caught by initial service check, but as a safeguard raise ValueError(f"Unsupported service determined: {service}") - except Exception as e: + except Exception: traceback.print_exc() raise diff --git a/routes/utils/watch/db.py b/routes/utils/watch/db.py index 76827b5..f6e0e67 100644 --- a/routes/utils/watch/db.py +++ b/routes/utils/watch/db.py @@ -1,80 +1,85 @@ import sqlite3 -import json from pathlib import Path import logging import time logger = logging.getLogger(__name__) -DB_DIR = Path('./data/watch') +DB_DIR = Path("./data/watch") # Define separate DB paths -PLAYLISTS_DB_PATH = DB_DIR / 'playlists.db' -ARTISTS_DB_PATH = DB_DIR / 'artists.db' +PLAYLISTS_DB_PATH = DB_DIR / "playlists.db" +ARTISTS_DB_PATH = DB_DIR / "artists.db" # Config path for watch.json is managed in routes.utils.watch.manager now # CONFIG_PATH = Path('./data/config/watch.json') # Removed # Expected column definitions EXPECTED_WATCHED_PLAYLISTS_COLUMNS = { - 'spotify_id': 'TEXT PRIMARY KEY', - 'name': 'TEXT', - 'owner_id': 'TEXT', - 'owner_name': 'TEXT', - 'total_tracks': 'INTEGER', - 'link': 'TEXT', - 'snapshot_id': 'TEXT', - 'last_checked': 'INTEGER', - 'added_at': 'INTEGER', - 'is_active': 'INTEGER DEFAULT 1' + "spotify_id": "TEXT PRIMARY KEY", + "name": "TEXT", + "owner_id": "TEXT", + "owner_name": "TEXT", + "total_tracks": "INTEGER", + "link": "TEXT", + "snapshot_id": "TEXT", + "last_checked": "INTEGER", + "added_at": "INTEGER", + "is_active": "INTEGER DEFAULT 1", } EXPECTED_PLAYLIST_TRACKS_COLUMNS = { - 'spotify_track_id': 'TEXT PRIMARY KEY', - 'title': 'TEXT', - 'artist_names': 'TEXT', - 'album_name': 'TEXT', - 'album_artist_names': 'TEXT', - 'track_number': 'INTEGER', - 'album_spotify_id': 'TEXT', - 'duration_ms': 'INTEGER', - 'added_at_playlist': 'TEXT', - 'added_to_db': 'INTEGER', - 'is_present_in_spotify': 'INTEGER DEFAULT 1', - 'last_seen_in_spotify': 'INTEGER' + "spotify_track_id": "TEXT PRIMARY KEY", + "title": "TEXT", + "artist_names": "TEXT", + "album_name": "TEXT", + "album_artist_names": "TEXT", + "track_number": "INTEGER", + "album_spotify_id": "TEXT", + "duration_ms": "INTEGER", + "added_at_playlist": "TEXT", + "added_to_db": "INTEGER", + "is_present_in_spotify": "INTEGER DEFAULT 1", + "last_seen_in_spotify": "INTEGER", } EXPECTED_WATCHED_ARTISTS_COLUMNS = { - 'spotify_id': 'TEXT PRIMARY KEY', - 'name': 'TEXT', - 'link': 'TEXT', - 'total_albums_on_spotify': 'INTEGER', # Number of albums found via API - 'last_checked': 'INTEGER', - 'added_at': 'INTEGER', - 'is_active': 'INTEGER DEFAULT 1', - 'genres': 'TEXT', # Comma-separated - 'popularity': 'INTEGER', - 'image_url': 'TEXT' + "spotify_id": "TEXT PRIMARY KEY", + "name": "TEXT", + "link": "TEXT", + "total_albums_on_spotify": "INTEGER", # Number of albums found via API + "last_checked": "INTEGER", + "added_at": "INTEGER", + "is_active": "INTEGER DEFAULT 1", + "genres": "TEXT", # Comma-separated + "popularity": "INTEGER", + "image_url": "TEXT", } EXPECTED_ARTIST_ALBUMS_COLUMNS = { - 'album_spotify_id': 'TEXT PRIMARY KEY', - 'artist_spotify_id': 'TEXT', # Foreign key to watched_artists - 'name': 'TEXT', - 'album_group': 'TEXT', # album, single, compilation, appears_on - 'album_type': 'TEXT', # album, single, compilation - 'release_date': 'TEXT', - 'release_date_precision': 'TEXT', # year, month, day - 'total_tracks': 'INTEGER', - 'link': 'TEXT', - 'image_url': 'TEXT', - 'added_to_db': 'INTEGER', - 'last_seen_on_spotify': 'INTEGER', # Timestamp when last confirmed via API - 'download_task_id': 'TEXT', - 'download_status': 'INTEGER DEFAULT 0', # 0: Not Queued, 1: Queued/In Progress, 2: Downloaded, 3: Error - 'is_fully_downloaded_managed_by_app': 'INTEGER DEFAULT 0' # 0: No, 1: Yes (app has marked all its tracks as downloaded) + "album_spotify_id": "TEXT PRIMARY KEY", + "artist_spotify_id": "TEXT", # Foreign key to watched_artists + "name": "TEXT", + "album_group": "TEXT", # album, single, compilation, appears_on + "album_type": "TEXT", # album, single, compilation + "release_date": "TEXT", + "release_date_precision": "TEXT", # year, month, day + "total_tracks": "INTEGER", + "link": "TEXT", + "image_url": "TEXT", + "added_to_db": "INTEGER", + "last_seen_on_spotify": "INTEGER", # Timestamp when last confirmed via API + "download_task_id": "TEXT", + "download_status": "INTEGER DEFAULT 0", # 0: Not Queued, 1: Queued/In Progress, 2: Downloaded, 3: Error + "is_fully_downloaded_managed_by_app": "INTEGER DEFAULT 0", # 0: No, 1: Yes (app has marked all its tracks as downloaded) } -def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_columns: dict, table_description: str): + +def _ensure_table_schema( + cursor: sqlite3.Cursor, + table_name: str, + expected_columns: dict, + table_description: str, +): """ Ensures the given table has all expected columns, adding them if necessary. """ @@ -86,7 +91,9 @@ def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_colum added_columns_to_this_table = False for col_name, col_type in expected_columns.items(): if col_name not in existing_column_names: - if 'PRIMARY KEY' in col_type.upper() and existing_columns_info: # Only warn if table already exists + if ( + "PRIMARY KEY" in col_type.upper() and existing_columns_info + ): # Only warn if table already exists logger.warning( f"Column '{col_name}' is part of PRIMARY KEY for {table_description} '{table_name}' " f"and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN. " @@ -94,10 +101,14 @@ def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_colum ) continue - col_type_for_add = col_type.replace(' PRIMARY KEY', '').strip() + col_type_for_add = col_type.replace(" PRIMARY KEY", "").strip() try: - cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_for_add}") - logger.info(f"Added missing column '{col_name} {col_type_for_add}' to {table_description} table '{table_name}'.") + cursor.execute( + f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_for_add}" + ) + logger.info( + f"Added missing column '{col_name} {col_type_for_add}' to {table_description} table '{table_name}'." + ) added_columns_to_this_table = True except sqlite3.OperationalError as alter_e: logger.warning( @@ -106,21 +117,27 @@ def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_colum ) return added_columns_to_this_table except sqlite3.Error as e: - logger.error(f"Error ensuring schema for {table_description} table '{table_name}': {e}", exc_info=True) + logger.error( + f"Error ensuring schema for {table_description} table '{table_name}': {e}", + exc_info=True, + ) return False + def _get_playlists_db_connection(): DB_DIR.mkdir(parents=True, exist_ok=True) conn = sqlite3.connect(PLAYLISTS_DB_PATH, timeout=10) conn.row_factory = sqlite3.Row return conn + def _get_artists_db_connection(): DB_DIR.mkdir(parents=True, exist_ok=True) conn = sqlite3.connect(ARTISTS_DB_PATH, timeout=10) conn.row_factory = sqlite3.Row return conn + def init_playlists_db(): """Initializes the playlists database and creates/updates the main watched_playlists table.""" try: @@ -141,18 +158,26 @@ def init_playlists_db(): ) """) # Ensure schema - if _ensure_table_schema(cursor, 'watched_playlists', EXPECTED_WATCHED_PLAYLISTS_COLUMNS, "watched playlists"): + if _ensure_table_schema( + cursor, + "watched_playlists", + EXPECTED_WATCHED_PLAYLISTS_COLUMNS, + "watched playlists", + ): conn.commit() - logger.info(f"Playlists database initialized/updated successfully at {PLAYLISTS_DB_PATH}") + logger.info( + f"Playlists database initialized/updated successfully at {PLAYLISTS_DB_PATH}" + ) except sqlite3.Error as e: logger.error(f"Error initializing watched_playlists table: {e}", exc_info=True) raise + def _create_playlist_tracks_table(playlist_spotify_id: str): """Creates or updates a table for a specific playlist to store its tracks in playlists.db.""" - table_name = f"playlist_{playlist_spotify_id.replace('-', '_').replace(' ', '_')}" # Sanitize table name + table_name = f"playlist_{playlist_spotify_id.replace('-', '_').replace(' ', '_')}" # Sanitize table name try: - with _get_playlists_db_connection() as conn: # Use playlists connection + with _get_playlists_db_connection() as conn: # Use playlists connection cursor = conn.cursor() cursor.execute(f""" CREATE TABLE IF NOT EXISTS {table_name} ( @@ -171,112 +196,173 @@ def _create_playlist_tracks_table(playlist_spotify_id: str): ) """) # Ensure schema - if _ensure_table_schema(cursor, table_name, EXPECTED_PLAYLIST_TRACKS_COLUMNS, f"playlist tracks ({playlist_spotify_id})"): + if _ensure_table_schema( + cursor, + table_name, + EXPECTED_PLAYLIST_TRACKS_COLUMNS, + f"playlist tracks ({playlist_spotify_id})", + ): conn.commit() - logger.info(f"Tracks table '{table_name}' created/updated or already exists in {PLAYLISTS_DB_PATH}.") + logger.info( + f"Tracks table '{table_name}' created/updated or already exists in {PLAYLISTS_DB_PATH}." + ) except sqlite3.Error as e: - logger.error(f"Error creating playlist tracks table {table_name} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error creating playlist tracks table {table_name} in {PLAYLISTS_DB_PATH}: {e}", + exc_info=True, + ) raise + def add_playlist_to_watch(playlist_data: dict): """Adds a playlist to the watched_playlists table and creates its tracks table in playlists.db.""" try: - _create_playlist_tracks_table(playlist_data['id']) - with _get_playlists_db_connection() as conn: # Use playlists connection + _create_playlist_tracks_table(playlist_data["id"]) + with _get_playlists_db_connection() as conn: # Use playlists connection cursor = conn.cursor() - cursor.execute(""" - INSERT OR REPLACE INTO watched_playlists + cursor.execute( + """ + INSERT OR REPLACE INTO watched_playlists (spotify_id, name, owner_id, owner_name, total_tracks, link, snapshot_id, last_checked, added_at, is_active) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 1) - """, ( - playlist_data['id'], - playlist_data['name'], - playlist_data['owner']['id'], - playlist_data['owner'].get('display_name', playlist_data['owner']['id']), - playlist_data['tracks']['total'], - playlist_data['external_urls']['spotify'], - playlist_data.get('snapshot_id'), - int(time.time()), - int(time.time()) - )) + """, + ( + playlist_data["id"], + playlist_data["name"], + playlist_data["owner"]["id"], + playlist_data["owner"].get( + "display_name", playlist_data["owner"]["id"] + ), + playlist_data["tracks"]["total"], + playlist_data["external_urls"]["spotify"], + playlist_data.get("snapshot_id"), + int(time.time()), + int(time.time()), + ), + ) conn.commit() - logger.info(f"Playlist '{playlist_data['name']}' ({playlist_data['id']}) added to watchlist in {PLAYLISTS_DB_PATH}.") + logger.info( + f"Playlist '{playlist_data['name']}' ({playlist_data['id']}) added to watchlist in {PLAYLISTS_DB_PATH}." + ) except sqlite3.Error as e: - logger.error(f"Error adding playlist {playlist_data.get('id')} to watchlist in {PLAYLISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error adding playlist {playlist_data.get('id')} to watchlist in {PLAYLISTS_DB_PATH}: {e}", + exc_info=True, + ) raise + def remove_playlist_from_watch(playlist_spotify_id: str): """Removes a playlist from watched_playlists and drops its tracks table in playlists.db.""" table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}" try: - with _get_playlists_db_connection() as conn: # Use playlists connection + with _get_playlists_db_connection() as conn: # Use playlists connection cursor = conn.cursor() - cursor.execute("DELETE FROM watched_playlists WHERE spotify_id = ?", (playlist_spotify_id,)) + cursor.execute( + "DELETE FROM watched_playlists WHERE spotify_id = ?", + (playlist_spotify_id,), + ) cursor.execute(f"DROP TABLE IF EXISTS {table_name}") conn.commit() - logger.info(f"Playlist {playlist_spotify_id} removed from watchlist and its table '{table_name}' dropped in {PLAYLISTS_DB_PATH}.") + logger.info( + f"Playlist {playlist_spotify_id} removed from watchlist and its table '{table_name}' dropped in {PLAYLISTS_DB_PATH}." + ) except sqlite3.Error as e: - logger.error(f"Error removing playlist {playlist_spotify_id} from watchlist in {PLAYLISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error removing playlist {playlist_spotify_id} from watchlist in {PLAYLISTS_DB_PATH}: {e}", + exc_info=True, + ) raise + def get_watched_playlists(): """Retrieves all active playlists from the watched_playlists table in playlists.db.""" try: - with _get_playlists_db_connection() as conn: # Use playlists connection + with _get_playlists_db_connection() as conn: # Use playlists connection cursor = conn.cursor() cursor.execute("SELECT * FROM watched_playlists WHERE is_active = 1") playlists = [dict(row) for row in cursor.fetchall()] return playlists except sqlite3.Error as e: - logger.error(f"Error retrieving watched playlists from {PLAYLISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error retrieving watched playlists from {PLAYLISTS_DB_PATH}: {e}", + exc_info=True, + ) return [] + def get_watched_playlist(playlist_spotify_id: str): """Retrieves a specific playlist from the watched_playlists table in playlists.db.""" try: - with _get_playlists_db_connection() as conn: # Use playlists connection + with _get_playlists_db_connection() as conn: # Use playlists connection cursor = conn.cursor() - cursor.execute("SELECT * FROM watched_playlists WHERE spotify_id = ?", (playlist_spotify_id,)) + cursor.execute( + "SELECT * FROM watched_playlists WHERE spotify_id = ?", + (playlist_spotify_id,), + ) row = cursor.fetchone() return dict(row) if row else None except sqlite3.Error as e: - logger.error(f"Error retrieving playlist {playlist_spotify_id} from {PLAYLISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error retrieving playlist {playlist_spotify_id} from {PLAYLISTS_DB_PATH}: {e}", + exc_info=True, + ) return None -def update_playlist_snapshot(playlist_spotify_id: str, snapshot_id: str, total_tracks: int): + +def update_playlist_snapshot( + playlist_spotify_id: str, snapshot_id: str, total_tracks: int +): """Updates the snapshot_id and total_tracks for a watched playlist in playlists.db.""" try: - with _get_playlists_db_connection() as conn: # Use playlists connection + with _get_playlists_db_connection() as conn: # Use playlists connection cursor = conn.cursor() - cursor.execute(""" - UPDATE watched_playlists + cursor.execute( + """ + UPDATE watched_playlists SET snapshot_id = ?, total_tracks = ?, last_checked = ? WHERE spotify_id = ? - """, (snapshot_id, total_tracks, int(time.time()), playlist_spotify_id)) + """, + (snapshot_id, total_tracks, int(time.time()), playlist_spotify_id), + ) conn.commit() except sqlite3.Error as e: - logger.error(f"Error updating snapshot for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error updating snapshot for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}: {e}", + exc_info=True, + ) + def get_playlist_track_ids_from_db(playlist_spotify_id: str): """Retrieves all track Spotify IDs from a specific playlist's tracks table in playlists.db.""" table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}" - track_ids = set() + track_ids: set[str] = set() try: - with _get_playlists_db_connection() as conn: # Use playlists connection + with _get_playlists_db_connection() as conn: # Use playlists connection cursor = conn.cursor() - cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';") + cursor.execute( + f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';" + ) if cursor.fetchone() is None: - logger.warning(f"Track table {table_name} does not exist in {PLAYLISTS_DB_PATH}. Cannot fetch track IDs.") + logger.warning( + f"Track table {table_name} does not exist in {PLAYLISTS_DB_PATH}. Cannot fetch track IDs." + ) return track_ids - cursor.execute(f"SELECT spotify_track_id FROM {table_name} WHERE is_present_in_spotify = 1") + cursor.execute( + f"SELECT spotify_track_id FROM {table_name} WHERE is_present_in_spotify = 1" + ) rows = cursor.fetchall() for row in rows: - track_ids.add(row['spotify_track_id']) + track_ids.add(row["spotify_track_id"]) return track_ids except sqlite3.Error as e: - logger.error(f"Error retrieving track IDs for playlist {playlist_spotify_id} from table {table_name} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error retrieving track IDs for playlist {playlist_spotify_id} from table {table_name} in {PLAYLISTS_DB_PATH}: {e}", + exc_info=True, + ) return track_ids + def add_tracks_to_playlist_db(playlist_spotify_id: str, tracks_data: list): """ Updates existing tracks in the playlist's DB table to mark them as currently present @@ -290,39 +376,57 @@ def add_tracks_to_playlist_db(playlist_spotify_id: str, tracks_data: list): current_time = int(time.time()) tracks_to_update = [] for track_item in tracks_data: - track = track_item.get('track') - if not track or not track.get('id'): - logger.warning(f"Skipping track update due to missing data or ID in playlist {playlist_spotify_id}: {track_item}") + track = track_item.get("track") + if not track or not track.get("id"): + logger.warning( + f"Skipping track update due to missing data or ID in playlist {playlist_spotify_id}: {track_item}" + ) continue - artist_names = ", ".join([artist['name'] for artist in track.get('artists', []) if artist.get('name')]) - album_artist_names = ", ".join([artist['name'] for artist in track.get('album', {}).get('artists', []) if artist.get('name')]) + artist_names = ", ".join( + [ + artist["name"] + for artist in track.get("artists", []) + if artist.get("name") + ] + ) + album_artist_names = ", ".join( + [ + artist["name"] + for artist in track.get("album", {}).get("artists", []) + if artist.get("name") + ] + ) # Prepare tuple for UPDATE statement. # Order: title, artist_names, album_name, album_artist_names, track_number, # album_spotify_id, duration_ms, added_at_playlist, # is_present_in_spotify, last_seen_in_spotify, spotify_track_id (for WHERE) - tracks_to_update.append(( - track.get('name', 'N/A'), - artist_names, - track.get('album', {}).get('name', 'N/A'), - album_artist_names, - track.get('track_number'), - track.get('album', {}).get('id'), - track.get('duration_ms'), - track_item.get('added_at'), # From playlist item, update if changed - 1, # is_present_in_spotify flag - current_time, # last_seen_in_spotify timestamp - # added_to_db is NOT updated here as this function only updates existing records. - track['id'] # spotify_track_id for the WHERE clause - )) + tracks_to_update.append( + ( + track.get("name", "N/A"), + artist_names, + track.get("album", {}).get("name", "N/A"), + album_artist_names, + track.get("track_number"), + track.get("album", {}).get("id"), + track.get("duration_ms"), + track_item.get("added_at"), # From playlist item, update if changed + 1, # is_present_in_spotify flag + current_time, # last_seen_in_spotify timestamp + # added_to_db is NOT updated here as this function only updates existing records. + track["id"], # spotify_track_id for the WHERE clause + ) + ) if not tracks_to_update: - logger.info(f"No valid tracks to prepare for update for playlist {playlist_spotify_id}.") + logger.info( + f"No valid tracks to prepare for update for playlist {playlist_spotify_id}." + ) return try: - with _get_playlists_db_connection() as conn: # Use playlists connection + with _get_playlists_db_connection() as conn: # Use playlists connection cursor = conn.cursor() # The table should have been created when the playlist was added to watch # or when the first track was successfully downloaded. @@ -330,7 +434,8 @@ def add_tracks_to_playlist_db(playlist_spotify_id: str, tracks_data: list): # The fields in SET must match the order of ?s, excluding the last one for WHERE. # This will only update rows where spotify_track_id matches. - cursor.executemany(f""" + cursor.executemany( + f""" UPDATE {table_name} SET title = ?, artist_names = ?, @@ -343,30 +448,48 @@ def add_tracks_to_playlist_db(playlist_spotify_id: str, tracks_data: list): is_present_in_spotify = ?, last_seen_in_spotify = ? WHERE spotify_track_id = ? - """, tracks_to_update) + """, + tracks_to_update, + ) conn.commit() - logger.info(f"Attempted to update metadata for {len(tracks_to_update)} tracks from API in DB for playlist {playlist_spotify_id}. Actual rows updated: {cursor.rowcount if cursor.rowcount != -1 else 'unknown'}.") + logger.info( + f"Attempted to update metadata for {len(tracks_to_update)} tracks from API in DB for playlist {playlist_spotify_id}. Actual rows updated: {cursor.rowcount if cursor.rowcount != -1 else 'unknown'}." + ) except sqlite3.Error as e: - logger.error(f"Error updating tracks in playlist {playlist_spotify_id} in table {table_name} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error updating tracks in playlist {playlist_spotify_id} in table {table_name} in {PLAYLISTS_DB_PATH}: {e}", + exc_info=True, + ) # Not raising here to allow other operations to continue if one batch fails. -def mark_tracks_as_not_present_in_spotify(playlist_spotify_id: str, track_ids_to_mark: list): + +def mark_tracks_as_not_present_in_spotify( + playlist_spotify_id: str, track_ids_to_mark: list +): """Marks specified tracks as not present in the Spotify playlist anymore in playlists.db.""" table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}" if not track_ids_to_mark: return try: - with _get_playlists_db_connection() as conn: # Use playlists connection + with _get_playlists_db_connection() as conn: # Use playlists connection cursor = conn.cursor() - placeholders = ','.join('?' for _ in track_ids_to_mark) + placeholders = ",".join("?" for _ in track_ids_to_mark) sql = f"UPDATE {table_name} SET is_present_in_spotify = 0 WHERE spotify_track_id IN ({placeholders})" cursor.execute(sql, track_ids_to_mark) conn.commit() - logger.info(f"Marked {cursor.rowcount} tracks as not present in Spotify for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}.") + logger.info( + f"Marked {cursor.rowcount} tracks as not present in Spotify for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}." + ) except sqlite3.Error as e: - logger.error(f"Error marking tracks as not present for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error marking tracks as not present for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}: {e}", + exc_info=True, + ) -def add_specific_tracks_to_playlist_table(playlist_spotify_id: str, track_details_list: list): + +def add_specific_tracks_to_playlist_table( + playlist_spotify_id: str, track_details_list: list +): """ Adds specific tracks (with full details fetched separately) to the playlist's table. This is used when a user manually marks tracks as "downloaded" or "known". @@ -378,48 +501,79 @@ def add_specific_tracks_to_playlist_table(playlist_spotify_id: str, track_detail current_time = int(time.time()) tracks_to_insert = [] - for track in track_details_list: # track here is assumed to be a full Spotify TrackObject - if not track or not track.get('id'): - logger.warning(f"Skipping track due to missing data or ID (manual add) in playlist {playlist_spotify_id}: {track}") + for ( + track + ) in track_details_list: # track here is assumed to be a full Spotify TrackObject + if not track or not track.get("id"): + logger.warning( + f"Skipping track due to missing data or ID (manual add) in playlist {playlist_spotify_id}: {track}" + ) continue - artist_names = ", ".join([artist['name'] for artist in track.get('artists', []) if artist.get('name')]) - album_artist_names = ", ".join([artist['name'] for artist in track.get('album', {}).get('artists', []) if artist.get('name')]) + artist_names = ", ".join( + [ + artist["name"] + for artist in track.get("artists", []) + if artist.get("name") + ] + ) + album_artist_names = ", ".join( + [ + artist["name"] + for artist in track.get("album", {}).get("artists", []) + if artist.get("name") + ] + ) - tracks_to_insert.append(( - track['id'], - track.get('name', 'N/A'), - artist_names, - track.get('album', {}).get('name', 'N/A'), - album_artist_names, - track.get('track_number'), - track.get('album', {}).get('id'), - track.get('duration_ms'), - None, # added_at_playlist - not known for manually added tracks this way - current_time, # added_to_db - 1, # is_present_in_spotify (assume user wants it considered present) - current_time # last_seen_in_spotify - )) + tracks_to_insert.append( + ( + track["id"], + track.get("name", "N/A"), + artist_names, + track.get("album", {}).get("name", "N/A"), + album_artist_names, + track.get("track_number"), + track.get("album", {}).get("id"), + track.get("duration_ms"), + None, # added_at_playlist - not known for manually added tracks this way + current_time, # added_to_db + 1, # is_present_in_spotify (assume user wants it considered present) + current_time, # last_seen_in_spotify + ) + ) if not tracks_to_insert: - logger.info(f"No valid tracks to insert (manual add) for playlist {playlist_spotify_id}.") + logger.info( + f"No valid tracks to insert (manual add) for playlist {playlist_spotify_id}." + ) return try: - with _get_playlists_db_connection() as conn: # Use playlists connection + with _get_playlists_db_connection() as conn: # Use playlists connection cursor = conn.cursor() - _create_playlist_tracks_table(playlist_spotify_id) # Ensure table exists - cursor.executemany(f""" + _create_playlist_tracks_table(playlist_spotify_id) # Ensure table exists + cursor.executemany( + f""" INSERT OR REPLACE INTO {table_name} (spotify_track_id, title, artist_names, album_name, album_artist_names, track_number, album_spotify_id, duration_ms, added_at_playlist, added_to_db, is_present_in_spotify, last_seen_in_spotify) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, tracks_to_insert) + """, + tracks_to_insert, + ) conn.commit() - logger.info(f"Manually added/updated {len(tracks_to_insert)} tracks in DB for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}.") + logger.info( + f"Manually added/updated {len(tracks_to_insert)} tracks in DB for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}." + ) except sqlite3.Error as e: - logger.error(f"Error manually adding tracks to playlist {playlist_spotify_id} in table {table_name} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error manually adding tracks to playlist {playlist_spotify_id} in table {table_name} in {PLAYLISTS_DB_PATH}: {e}", + exc_info=True, + ) -def remove_specific_tracks_from_playlist_table(playlist_spotify_id: str, track_spotify_ids: list): + +def remove_specific_tracks_from_playlist_table( + playlist_spotify_id: str, track_spotify_ids: list +): """Removes specific tracks from the playlist's local track table.""" table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}" if not track_spotify_ids: @@ -428,64 +582,97 @@ def remove_specific_tracks_from_playlist_table(playlist_spotify_id: str, track_s try: with _get_playlists_db_connection() as conn: cursor = conn.cursor() - placeholders = ','.join('?' for _ in track_spotify_ids) + placeholders = ",".join("?" for _ in track_spotify_ids) # Check if table exists first - cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';") + cursor.execute( + f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';" + ) if cursor.fetchone() is None: - logger.warning(f"Track table {table_name} does not exist. Cannot remove tracks.") + logger.warning( + f"Track table {table_name} does not exist. Cannot remove tracks." + ) return 0 - cursor.execute(f"DELETE FROM {table_name} WHERE spotify_track_id IN ({placeholders})", track_spotify_ids) + cursor.execute( + f"DELETE FROM {table_name} WHERE spotify_track_id IN ({placeholders})", + track_spotify_ids, + ) conn.commit() deleted_count = cursor.rowcount - logger.info(f"Manually removed {deleted_count} tracks from DB for playlist {playlist_spotify_id}.") + logger.info( + f"Manually removed {deleted_count} tracks from DB for playlist {playlist_spotify_id}." + ) return deleted_count except sqlite3.Error as e: - logger.error(f"Error manually removing tracks for playlist {playlist_spotify_id} from table {table_name}: {e}", exc_info=True) + logger.error( + f"Error manually removing tracks for playlist {playlist_spotify_id} from table {table_name}: {e}", + exc_info=True, + ) return 0 + def add_single_track_to_playlist_db(playlist_spotify_id: str, track_item_for_db: dict): """Adds or updates a single track in the specified playlist's tracks table in playlists.db.""" table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}" - track_detail = track_item_for_db.get('track') - if not track_detail or not track_detail.get('id'): - logger.warning(f"Skipping single track due to missing data for playlist {playlist_spotify_id}: {track_item_for_db}") + track_detail = track_item_for_db.get("track") + if not track_detail or not track_detail.get("id"): + logger.warning( + f"Skipping single track due to missing data for playlist {playlist_spotify_id}: {track_item_for_db}" + ) return current_time = int(time.time()) - artist_names = ", ".join([a['name'] for a in track_detail.get('artists', []) if a.get('name')]) - album_artist_names = ", ".join([a['name'] for a in track_detail.get('album', {}).get('artists', []) if a.get('name')]) - + artist_names = ", ".join( + [a["name"] for a in track_detail.get("artists", []) if a.get("name")] + ) + album_artist_names = ", ".join( + [ + a["name"] + for a in track_detail.get("album", {}).get("artists", []) + if a.get("name") + ] + ) + track_data_tuple = ( - track_detail['id'], - track_detail.get('name', 'N/A'), + track_detail["id"], + track_detail.get("name", "N/A"), artist_names, - track_detail.get('album', {}).get('name', 'N/A'), + track_detail.get("album", {}).get("name", "N/A"), album_artist_names, - track_detail.get('track_number'), - track_detail.get('album', {}).get('id'), - track_detail.get('duration_ms'), - track_item_for_db.get('added_at'), - current_time, - 1, - current_time + track_detail.get("track_number"), + track_detail.get("album", {}).get("id"), + track_detail.get("duration_ms"), + track_item_for_db.get("added_at"), + current_time, + 1, + current_time, ) try: - with _get_playlists_db_connection() as conn: # Use playlists connection + with _get_playlists_db_connection() as conn: # Use playlists connection cursor = conn.cursor() - _create_playlist_tracks_table(playlist_spotify_id) - cursor.execute(f""" + _create_playlist_tracks_table(playlist_spotify_id) + cursor.execute( + f""" INSERT OR REPLACE INTO {table_name} (spotify_track_id, title, artist_names, album_name, album_artist_names, track_number, album_spotify_id, duration_ms, added_at_playlist, added_to_db, is_present_in_spotify, last_seen_in_spotify) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, track_data_tuple) + """, + track_data_tuple, + ) conn.commit() - logger.info(f"Track '{track_detail.get('name')}' added/updated in DB for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}.") + logger.info( + f"Track '{track_detail.get('name')}' added/updated in DB for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}." + ) except sqlite3.Error as e: - logger.error(f"Error adding single track to playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error adding single track to playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}: {e}", + exc_info=True, + ) + # --- Artist Watch Database Functions --- + def init_artists_db(): """Initializes the artists database and creates/updates the main watched_artists table.""" try: @@ -508,18 +695,29 @@ def init_artists_db(): ) """) # Ensure schema - if _ensure_table_schema(cursor, 'watched_artists', EXPECTED_WATCHED_ARTISTS_COLUMNS, "watched artists"): + if _ensure_table_schema( + cursor, + "watched_artists", + EXPECTED_WATCHED_ARTISTS_COLUMNS, + "watched artists", + ): conn.commit() - logger.info(f"Artists database initialized/updated successfully at {ARTISTS_DB_PATH}") + logger.info( + f"Artists database initialized/updated successfully at {ARTISTS_DB_PATH}" + ) except sqlite3.Error as e: - logger.error(f"Error initializing watched_artists table in {ARTISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error initializing watched_artists table in {ARTISTS_DB_PATH}: {e}", + exc_info=True, + ) raise + def _create_artist_albums_table(artist_spotify_id: str): """Creates or updates a table for a specific artist to store their albums in artists.db.""" - table_name = f"artist_{artist_spotify_id.replace('-', '_').replace(' ', '_')}" # Sanitize table name + table_name = f"artist_{artist_spotify_id.replace('-', '_').replace(' ', '_')}" # Sanitize table name try: - with _get_artists_db_connection() as conn: # Use artists connection + with _get_artists_db_connection() as conn: # Use artists connection cursor = conn.cursor() # Note: Several columns including artist_spotify_id, release_date_precision, image_url, # last_seen_on_spotify, download_task_id, download_status, is_fully_downloaded_managed_by_app @@ -527,33 +725,44 @@ def _create_artist_albums_table(artist_spotify_id: str): cursor.execute(f""" CREATE TABLE IF NOT EXISTS {table_name} ( album_spotify_id TEXT PRIMARY KEY, - artist_spotify_id TEXT, + artist_spotify_id TEXT, name TEXT, - album_group TEXT, - album_type TEXT, + album_group TEXT, + album_type TEXT, release_date TEXT, release_date_precision TEXT, total_tracks INTEGER, link TEXT, image_url TEXT, - added_to_db INTEGER, + added_to_db INTEGER, last_seen_on_spotify INTEGER, download_task_id TEXT, download_status INTEGER DEFAULT 0, - is_fully_downloaded_managed_by_app INTEGER DEFAULT 0 + is_fully_downloaded_managed_by_app INTEGER DEFAULT 0 ) """) # Ensure schema for the specific artist's album table - if _ensure_table_schema(cursor, table_name, EXPECTED_ARTIST_ALBUMS_COLUMNS, f"artist albums ({artist_spotify_id})"): + if _ensure_table_schema( + cursor, + table_name, + EXPECTED_ARTIST_ALBUMS_COLUMNS, + f"artist albums ({artist_spotify_id})", + ): conn.commit() - logger.info(f"Albums table '{table_name}' created/updated or already exists in {ARTISTS_DB_PATH}.") + logger.info( + f"Albums table '{table_name}' created/updated or already exists in {ARTISTS_DB_PATH}." + ) except sqlite3.Error as e: - logger.error(f"Error creating artist albums table {table_name} in {ARTISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error creating artist albums table {table_name} in {ARTISTS_DB_PATH}: {e}", + exc_info=True, + ) raise + def add_artist_to_watch(artist_data: dict): """Adds an artist to the watched_artists table and creates its albums table in artists.db.""" - artist_id = artist_data.get('id') + artist_id = artist_data.get("id") if not artist_id: logger.error("Cannot add artist to watch: Missing 'id' in artist_data.") return @@ -562,40 +771,60 @@ def add_artist_to_watch(artist_data: dict): _create_artist_albums_table(artist_id) with _get_artists_db_connection() as conn: cursor = conn.cursor() - cursor.execute(""" - INSERT OR REPLACE INTO watched_artists + cursor.execute( + """ + INSERT OR REPLACE INTO watched_artists (spotify_id, name, total_albums_on_spotify, last_checked, added_at, is_active) VALUES (?, ?, ?, ?, ?, 1) - """, ( - artist_id, - artist_data.get('name', 'N/A'), - artist_data.get('albums', {}).get('total', 0), - int(time.time()), - int(time.time()) - )) + """, + ( + artist_id, + artist_data.get("name", "N/A"), + artist_data.get("albums", {}).get("total", 0), + int(time.time()), + int(time.time()), + ), + ) conn.commit() - logger.info(f"Artist '{artist_data.get('name')}' ({artist_id}) added to watchlist in {ARTISTS_DB_PATH}.") + logger.info( + f"Artist '{artist_data.get('name')}' ({artist_id}) added to watchlist in {ARTISTS_DB_PATH}." + ) except sqlite3.Error as e: - logger.error(f"Error adding artist {artist_id} to watchlist in {ARTISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error adding artist {artist_id} to watchlist in {ARTISTS_DB_PATH}: {e}", + exc_info=True, + ) raise except KeyError as e: - logger.error(f"Missing key in artist_data for artist {artist_id}: {e}. Data: {artist_data}", exc_info=True) + logger.error( + f"Missing key in artist_data for artist {artist_id}: {e}. Data: {artist_data}", + exc_info=True, + ) raise + def remove_artist_from_watch(artist_spotify_id: str): """Removes an artist from watched_artists and drops its albums table in artists.db.""" table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums" try: with _get_artists_db_connection() as conn: cursor = conn.cursor() - cursor.execute("DELETE FROM watched_artists WHERE spotify_id = ?", (artist_spotify_id,)) + cursor.execute( + "DELETE FROM watched_artists WHERE spotify_id = ?", (artist_spotify_id,) + ) cursor.execute(f"DROP TABLE IF EXISTS {table_name}") conn.commit() - logger.info(f"Artist {artist_spotify_id} removed from watchlist and its table '{table_name}' dropped from {ARTISTS_DB_PATH}.") + logger.info( + f"Artist {artist_spotify_id} removed from watchlist and its table '{table_name}' dropped from {ARTISTS_DB_PATH}." + ) except sqlite3.Error as e: - logger.error(f"Error removing artist {artist_spotify_id} from watchlist in {ARTISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error removing artist {artist_spotify_id} from watchlist in {ARTISTS_DB_PATH}: {e}", + exc_info=True, + ) raise + def get_watched_artists(): """Retrieves all active artists from the watched_artists table in artists.db.""" try: @@ -605,153 +834,224 @@ def get_watched_artists(): artists = [dict(row) for row in cursor.fetchall()] return artists except sqlite3.Error as e: - logger.error(f"Error retrieving watched artists from {ARTISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error retrieving watched artists from {ARTISTS_DB_PATH}: {e}", + exc_info=True, + ) return [] + def get_watched_artist(artist_spotify_id: str): """Retrieves a specific artist from the watched_artists table in artists.db.""" try: with _get_artists_db_connection() as conn: cursor = conn.cursor() - cursor.execute("SELECT * FROM watched_artists WHERE spotify_id = ?", (artist_spotify_id,)) + cursor.execute( + "SELECT * FROM watched_artists WHERE spotify_id = ?", + (artist_spotify_id,), + ) row = cursor.fetchone() return dict(row) if row else None except sqlite3.Error as e: - logger.error(f"Error retrieving artist {artist_spotify_id} from {ARTISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error retrieving artist {artist_spotify_id} from {ARTISTS_DB_PATH}: {e}", + exc_info=True, + ) return None -def update_artist_metadata_after_check(artist_spotify_id: str, total_albums_from_api: int): + +def update_artist_metadata_after_check( + artist_spotify_id: str, total_albums_from_api: int +): """Updates the total_albums_on_spotify and last_checked for an artist in artists.db.""" try: with _get_artists_db_connection() as conn: cursor = conn.cursor() - cursor.execute(""" - UPDATE watched_artists + cursor.execute( + """ + UPDATE watched_artists SET total_albums_on_spotify = ?, last_checked = ? WHERE spotify_id = ? - """, (total_albums_from_api, int(time.time()), artist_spotify_id)) + """, + (total_albums_from_api, int(time.time()), artist_spotify_id), + ) conn.commit() except sqlite3.Error as e: - logger.error(f"Error updating metadata for artist {artist_spotify_id} in {ARTISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error updating metadata for artist {artist_spotify_id} in {ARTISTS_DB_PATH}: {e}", + exc_info=True, + ) + def get_artist_album_ids_from_db(artist_spotify_id: str): """Retrieves all album Spotify IDs from a specific artist's albums table in artists.db.""" table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums" - album_ids = set() + album_ids: set[str] = set() try: with _get_artists_db_connection() as conn: cursor = conn.cursor() - cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';") + cursor.execute( + f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';" + ) if cursor.fetchone() is None: - logger.warning(f"Album table {table_name} for artist {artist_spotify_id} does not exist in {ARTISTS_DB_PATH}. Cannot fetch album IDs.") + logger.warning( + f"Album table {table_name} for artist {artist_spotify_id} does not exist in {ARTISTS_DB_PATH}. Cannot fetch album IDs." + ) return album_ids cursor.execute(f"SELECT album_spotify_id FROM {table_name}") rows = cursor.fetchall() for row in rows: - album_ids.add(row['album_spotify_id']) + album_ids.add(row["album_spotify_id"]) return album_ids except sqlite3.Error as e: - logger.error(f"Error retrieving album IDs for artist {artist_spotify_id} from {ARTISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error retrieving album IDs for artist {artist_spotify_id} from {ARTISTS_DB_PATH}: {e}", + exc_info=True, + ) return album_ids -def add_or_update_album_for_artist(artist_spotify_id: str, album_data: dict, task_id: str = None, is_download_complete: bool = False): + +def add_or_update_album_for_artist( + artist_spotify_id: str, + album_data: dict, + task_id: str = None, + is_download_complete: bool = False, +): """Adds or updates an album in the specified artist's albums table in artists.db.""" table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums" - album_id = album_data.get('id') + album_id = album_data.get("id") if not album_id: - logger.warning(f"Skipping album for artist {artist_spotify_id} due to missing album ID: {album_data}") + logger.warning( + f"Skipping album for artist {artist_spotify_id} due to missing album ID: {album_data}" + ) return - download_status = 0 + download_status = 0 if task_id and not is_download_complete: - download_status = 1 - elif is_download_complete: - download_status = 2 + download_status = 1 + elif is_download_complete: + download_status = 2 current_time = int(time.time()) album_tuple = ( album_id, - album_data.get('name', 'N/A'), - album_data.get('album_group', 'N/A'), - album_data.get('album_type', 'N/A'), - album_data.get('release_date'), - album_data.get('total_tracks'), - current_time, + album_data.get("name", "N/A"), + album_data.get("album_group", "N/A"), + album_data.get("album_type", "N/A"), + album_data.get("release_date"), + album_data.get("total_tracks"), + current_time, download_status, - task_id + task_id, ) try: with _get_artists_db_connection() as conn: cursor = conn.cursor() - _create_artist_albums_table(artist_spotify_id) - - cursor.execute(f"SELECT added_to_db_at FROM {table_name} WHERE album_spotify_id = ?", (album_id,)) + _create_artist_albums_table(artist_spotify_id) + + cursor.execute( + f"SELECT added_to_db_at FROM {table_name} WHERE album_spotify_id = ?", + (album_id,), + ) existing_row = cursor.fetchone() - - if existing_row: + + if existing_row: update_tuple = ( - album_data.get('name', 'N/A'), - album_data.get('album_group', 'N/A'), - album_data.get('album_type', 'N/A'), - album_data.get('release_date'), - album_data.get('total_tracks'), - download_status, - task_id, - album_id + album_data.get("name", "N/A"), + album_data.get("album_group", "N/A"), + album_data.get("album_type", "N/A"), + album_data.get("release_date"), + album_data.get("total_tracks"), + download_status, + task_id, + album_id, ) - cursor.execute(f""" + cursor.execute( + f""" UPDATE {table_name} SET name = ?, album_group = ?, album_type = ?, release_date = ?, total_tracks = ?, is_download_initiated = ?, task_id = ? WHERE album_spotify_id = ? - """, update_tuple) - logger.info(f"Updated album '{album_data.get('name')}' in DB for artist {artist_spotify_id} in {ARTISTS_DB_PATH}.") - else: - cursor.execute(f""" + """, + update_tuple, + ) + logger.info( + f"Updated album '{album_data.get('name')}' in DB for artist {artist_spotify_id} in {ARTISTS_DB_PATH}." + ) + else: + cursor.execute( + f""" INSERT INTO {table_name} (album_spotify_id, name, album_group, album_type, release_date, total_tracks, added_to_db_at, is_download_initiated, task_id) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) - """, album_tuple) - logger.info(f"Added album '{album_data.get('name')}' to DB for artist {artist_spotify_id} in {ARTISTS_DB_PATH}.") + """, + album_tuple, + ) + logger.info( + f"Added album '{album_data.get('name')}' to DB for artist {artist_spotify_id} in {ARTISTS_DB_PATH}." + ) conn.commit() except sqlite3.Error as e: - logger.error(f"Error adding/updating album {album_id} for artist {artist_spotify_id} in {ARTISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error adding/updating album {album_id} for artist {artist_spotify_id} in {ARTISTS_DB_PATH}: {e}", + exc_info=True, + ) -def update_album_download_status_for_artist(artist_spotify_id: str, album_spotify_id: str, task_id: str, status: int): + +def update_album_download_status_for_artist( + artist_spotify_id: str, album_spotify_id: str, task_id: str, status: int +): """Updates the download status (is_download_initiated) and task_id for a specific album of an artist in artists.db.""" table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums" try: - with _get_artists_db_connection() as conn: + with _get_artists_db_connection() as conn: cursor = conn.cursor() - cursor.execute(f""" + cursor.execute( + f""" UPDATE {table_name} SET is_download_initiated = ?, task_id = ? WHERE album_spotify_id = ? - """, (status, task_id, album_spotify_id)) + """, + (status, task_id, album_spotify_id), + ) if cursor.rowcount == 0: - logger.warning(f"Attempted to update download status for non-existent album {album_spotify_id} for artist {artist_spotify_id} in {ARTISTS_DB_PATH}.") + logger.warning( + f"Attempted to update download status for non-existent album {album_spotify_id} for artist {artist_spotify_id} in {ARTISTS_DB_PATH}." + ) else: - logger.info(f"Updated download status to {status} for album {album_spotify_id} (task: {task_id}) for artist {artist_spotify_id} in {ARTISTS_DB_PATH}.") + logger.info( + f"Updated download status to {status} for album {album_spotify_id} (task: {task_id}) for artist {artist_spotify_id} in {ARTISTS_DB_PATH}." + ) conn.commit() except sqlite3.Error as e: - logger.error(f"Error updating album download status for album {album_spotify_id}, artist {artist_spotify_id} in {ARTISTS_DB_PATH}: {e}", exc_info=True) + logger.error( + f"Error updating album download status for album {album_spotify_id}, artist {artist_spotify_id} in {ARTISTS_DB_PATH}: {e}", + exc_info=True, + ) -def add_specific_albums_to_artist_table(artist_spotify_id: str, album_details_list: list): + +def add_specific_albums_to_artist_table( + artist_spotify_id: str, album_details_list: list +): """ Adds specific albums (with full details fetched separately) to the artist's album table. This can be used when a user manually marks albums as "known" or "processed". Albums added this way are marked with is_download_initiated = 3 (Manually Added/Known). """ if not album_details_list: - logger.info(f"No album details provided to add specifically for artist {artist_spotify_id}.") + logger.info( + f"No album details provided to add specifically for artist {artist_spotify_id}." + ) return 0 processed_count = 0 for album_data in album_details_list: - if not album_data or not album_data.get('id'): - logger.warning(f"Skipping album due to missing data or ID (manual add) for artist {artist_spotify_id}: {album_data}") + if not album_data or not album_data.get("id"): + logger.warning( + f"Skipping album due to missing data or ID (manual add) for artist {artist_spotify_id}: {album_data}" + ) continue - + # Use existing function to add/update, ensuring it handles manual state # Set task_id to None and is_download_initiated to a specific state for manually added known albums # The add_or_update_album_for_artist expects `is_download_complete` not `is_download_initiated` directly. @@ -763,22 +1063,32 @@ def add_specific_albums_to_artist_table(artist_spotify_id: str, album_details_li # We might need a new status like 3 for "Manually Marked as Known" # For simplicity, we'll use `add_or_update_album_for_artist` and the status will be 'download_complete'. # If a more distinct status is needed, `add_or_update_album_for_artist` would need adjustment. - + # Simplification: we'll call add_or_update_album_for_artist which will mark it based on task_id presence or completion. # For a truly "manual" state distinct from "downloaded", `add_or_update_album_for_artist` would need a new status value. # Let's assume for now that adding it via this function means it's "known" and doesn't need downloading. # The `add_or_update_album_for_artist` function sets is_download_initiated based on task_id and is_download_complete. # If task_id is None and is_download_complete is True, it implies it's processed. try: - add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=None, is_download_complete=True) + add_or_update_album_for_artist( + artist_spotify_id, album_data, task_id=None, is_download_complete=True + ) processed_count += 1 except Exception as e: - logger.error(f"Error manually adding album {album_data.get('id')} for artist {artist_spotify_id}: {e}", exc_info=True) - - logger.info(f"Manually added/updated {processed_count} albums in DB for artist {artist_spotify_id} in {ARTISTS_DB_PATH}.") + logger.error( + f"Error manually adding album {album_data.get('id')} for artist {artist_spotify_id}: {e}", + exc_info=True, + ) + + logger.info( + f"Manually added/updated {processed_count} albums in DB for artist {artist_spotify_id} in {ARTISTS_DB_PATH}." + ) return processed_count -def remove_specific_albums_from_artist_table(artist_spotify_id: str, album_spotify_ids: list): + +def remove_specific_albums_from_artist_table( + artist_spotify_id: str, album_spotify_ids: list +): """Removes specific albums from the artist's local album table.""" table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums" if not album_spotify_ids: @@ -787,22 +1097,35 @@ def remove_specific_albums_from_artist_table(artist_spotify_id: str, album_spoti try: with _get_artists_db_connection() as conn: cursor = conn.cursor() - placeholders = ','.join('?' for _ in album_spotify_ids) + placeholders = ",".join("?" for _ in album_spotify_ids) # Check if table exists first - cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';") + cursor.execute( + f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';" + ) if cursor.fetchone() is None: - logger.warning(f"Album table {table_name} for artist {artist_spotify_id} does not exist. Cannot remove albums.") + logger.warning( + f"Album table {table_name} for artist {artist_spotify_id} does not exist. Cannot remove albums." + ) return 0 - cursor.execute(f"DELETE FROM {table_name} WHERE album_spotify_id IN ({placeholders})", album_spotify_ids) + cursor.execute( + f"DELETE FROM {table_name} WHERE album_spotify_id IN ({placeholders})", + album_spotify_ids, + ) conn.commit() deleted_count = cursor.rowcount - logger.info(f"Manually removed {deleted_count} albums from DB for artist {artist_spotify_id}.") + logger.info( + f"Manually removed {deleted_count} albums from DB for artist {artist_spotify_id}." + ) return deleted_count except sqlite3.Error as e: - logger.error(f"Error manually removing albums for artist {artist_spotify_id} from table {table_name}: {e}", exc_info=True) + logger.error( + f"Error manually removing albums for artist {artist_spotify_id} from table {table_name}: {e}", + exc_info=True, + ) return 0 + def is_track_in_playlist_db(playlist_spotify_id: str, track_spotify_id: str) -> bool: """Checks if a specific track Spotify ID exists in the given playlist's tracks table.""" table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}" @@ -810,15 +1133,24 @@ def is_track_in_playlist_db(playlist_spotify_id: str, track_spotify_id: str) -> with _get_playlists_db_connection() as conn: cursor = conn.cursor() # First, check if the table exists to prevent errors on non-watched or new playlists - cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';") + cursor.execute( + f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';" + ) if cursor.fetchone() is None: - return False # Table doesn't exist, so track cannot be in it - - cursor.execute(f"SELECT 1 FROM {table_name} WHERE spotify_track_id = ?", (track_spotify_id,)) + return False # Table doesn't exist, so track cannot be in it + + cursor.execute( + f"SELECT 1 FROM {table_name} WHERE spotify_track_id = ?", + (track_spotify_id,), + ) return cursor.fetchone() is not None except sqlite3.Error as e: - logger.error(f"Error checking if track {track_spotify_id} is in playlist {playlist_spotify_id} DB: {e}", exc_info=True) - return False # Assume not present on error + logger.error( + f"Error checking if track {track_spotify_id} is in playlist {playlist_spotify_id} DB: {e}", + exc_info=True, + ) + return False # Assume not present on error + def is_album_in_artist_db(artist_spotify_id: str, album_spotify_id: str) -> bool: """Checks if a specific album Spotify ID exists in the given artist's albums table.""" @@ -827,12 +1159,20 @@ def is_album_in_artist_db(artist_spotify_id: str, album_spotify_id: str) -> bool with _get_artists_db_connection() as conn: cursor = conn.cursor() # First, check if the table exists - cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';") + cursor.execute( + f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';" + ) if cursor.fetchone() is None: - return False # Table doesn't exist + return False # Table doesn't exist - cursor.execute(f"SELECT 1 FROM {table_name} WHERE album_spotify_id = ?", (album_spotify_id,)) + cursor.execute( + f"SELECT 1 FROM {table_name} WHERE album_spotify_id = ?", + (album_spotify_id,), + ) return cursor.fetchone() is not None except sqlite3.Error as e: - logger.error(f"Error checking if album {album_spotify_id} is in artist {artist_spotify_id} DB: {e}", exc_info=True) - return False # Assume not present on error \ No newline at end of file + logger.error( + f"Error checking if album {album_spotify_id} is in artist {artist_spotify_id} DB: {e}", + exc_info=True, + ) + return False # Assume not present on error diff --git a/routes/utils/watch/manager.py b/routes/utils/watch/manager.py index 4aba65c..ed93ff9 100644 --- a/routes/utils/watch/manager.py +++ b/routes/utils/watch/manager.py @@ -3,6 +3,7 @@ import threading import logging import json from pathlib import Path +from typing import Any, List, Dict from routes.utils.watch.db import ( get_watched_playlists, @@ -12,29 +13,30 @@ from routes.utils.watch.db import ( update_playlist_snapshot, mark_tracks_as_not_present_in_spotify, # Artist watch DB functions - init_artists_db, get_watched_artists, get_watched_artist, get_artist_album_ids_from_db, - add_or_update_album_for_artist, # Renamed from add_album_to_artist_db - update_artist_metadata_after_check # Renamed from update_artist_metadata + update_artist_metadata_after_check, # Renamed from update_artist_metadata ) -from routes.utils.get_info import get_spotify_info # To fetch playlist, track, artist, and album details +from routes.utils.get_info import ( + get_spotify_info, +) # To fetch playlist, track, artist, and album details from routes.utils.celery_queue_manager import download_queue_manager logger = logging.getLogger(__name__) -CONFIG_FILE_PATH = Path('./data/config/watch.json') +CONFIG_FILE_PATH = Path("./data/config/watch.json") STOP_EVENT = threading.Event() DEFAULT_WATCH_CONFIG = { "enabled": False, "watchPollIntervalSeconds": 3600, - "max_tracks_per_run": 50, # For playlists - "watchedArtistAlbumGroup": ["album", "single"], # Default for artists + "max_tracks_per_run": 50, # For playlists + "watchedArtistAlbumGroup": ["album", "single"], # Default for artists "delay_between_playlists_seconds": 2, - "delay_between_artists_seconds": 5 # Added for artists + "delay_between_artists_seconds": 5, # Added for artists } + def get_watch_config(): """Loads the watch configuration from watch.json. Creates the file with defaults if it doesn't exist. @@ -45,43 +47,56 @@ def get_watch_config(): CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True) if not CONFIG_FILE_PATH.exists(): - logger.info(f"{CONFIG_FILE_PATH} not found. Creating with default watch config.") - with open(CONFIG_FILE_PATH, 'w') as f: + logger.info( + f"{CONFIG_FILE_PATH} not found. Creating with default watch config." + ) + with open(CONFIG_FILE_PATH, "w") as f: json.dump(DEFAULT_WATCH_CONFIG, f, indent=2) return DEFAULT_WATCH_CONFIG.copy() - with open(CONFIG_FILE_PATH, 'r') as f: + with open(CONFIG_FILE_PATH, "r") as f: config = json.load(f) - + updated = False for key, value in DEFAULT_WATCH_CONFIG.items(): if key not in config: config[key] = value updated = True - + if updated: - logger.info(f"Watch configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults.") - with open(CONFIG_FILE_PATH, 'w') as f: + logger.info( + f"Watch configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults." + ) + with open(CONFIG_FILE_PATH, "w") as f: json.dump(config, f, indent=2) return config except Exception as e: - logger.error(f"Error loading or creating watch config at {CONFIG_FILE_PATH}: {e}", exc_info=True) - return DEFAULT_WATCH_CONFIG.copy() # Fallback + logger.error( + f"Error loading or creating watch config at {CONFIG_FILE_PATH}: {e}", + exc_info=True, + ) + return DEFAULT_WATCH_CONFIG.copy() # Fallback + def construct_spotify_url(item_id, item_type="track"): return f"https://open.spotify.com/{item_type}/{item_id}" + def check_watched_playlists(specific_playlist_id: str = None): """Checks watched playlists for new tracks and queues downloads. If specific_playlist_id is provided, only that playlist is checked. """ - logger.info(f"Playlist Watch Manager: Starting check. Specific playlist: {specific_playlist_id or 'All'}") + logger.info( + f"Playlist Watch Manager: Starting check. Specific playlist: {specific_playlist_id or 'All'}" + ) config = get_watch_config() if specific_playlist_id: - playlist_obj = get_watched_playlist(specific_playlist_id) + playlist_obj = get_watched_playlist(specific_playlist_id) if not playlist_obj: - logger.error(f"Playlist Watch Manager: Playlist {specific_playlist_id} not found in watch database.") + logger.error( + f"Playlist Watch Manager: Playlist {specific_playlist_id} not found in watch database." + ) return watched_playlists_to_check = [playlist_obj] else: @@ -92,25 +107,36 @@ def check_watched_playlists(specific_playlist_id: str = None): return for playlist_in_db in watched_playlists_to_check: - playlist_spotify_id = playlist_in_db['spotify_id'] - playlist_name = playlist_in_db['name'] - logger.info(f"Playlist Watch Manager: Checking playlist '{playlist_name}' ({playlist_spotify_id})...") + playlist_spotify_id = playlist_in_db["spotify_id"] + playlist_name = playlist_in_db["name"] + logger.info( + f"Playlist Watch Manager: Checking playlist '{playlist_name}' ({playlist_spotify_id})..." + ) try: # For playlists, we fetch all tracks in one go usually (Spotify API limit permitting) - current_playlist_data_from_api = get_spotify_info(playlist_spotify_id, "playlist") - if not current_playlist_data_from_api or 'tracks' not in current_playlist_data_from_api: - logger.error(f"Playlist Watch Manager: Failed to fetch data or tracks from Spotify for playlist {playlist_spotify_id}.") + current_playlist_data_from_api = get_spotify_info( + playlist_spotify_id, "playlist" + ) + if ( + not current_playlist_data_from_api + or "tracks" not in current_playlist_data_from_api + ): + logger.error( + f"Playlist Watch Manager: Failed to fetch data or tracks from Spotify for playlist {playlist_spotify_id}." + ) continue - api_snapshot_id = current_playlist_data_from_api.get('snapshot_id') - api_total_tracks = current_playlist_data_from_api.get('tracks', {}).get('total', 0) - + api_snapshot_id = current_playlist_data_from_api.get("snapshot_id") + api_total_tracks = current_playlist_data_from_api.get("tracks", {}).get( + "total", 0 + ) + # Paginate through playlist tracks if necessary all_api_track_items = [] offset = 0 - limit = 50 # Spotify API limit for playlist items - + limit = 50 # Spotify API limit for playlist items + while True: # Re-fetch with pagination if tracks.next is present, or on first call. # get_spotify_info for playlist should ideally handle pagination internally if asked for all tracks. @@ -120,103 +146,152 @@ def check_watched_playlists(specific_playlist_id: str = None): # Modifying get_spotify_info is outside current scope, so we'll assume it returns ALL items for a playlist. # If it doesn't, this part would need adjustment for robust pagination. # For now, we use the items from the initial fetch. - - paginated_playlist_data = get_spotify_info(playlist_spotify_id, "playlist", offset=offset, limit=limit) - if not paginated_playlist_data or 'tracks' not in paginated_playlist_data: - break - - page_items = paginated_playlist_data.get('tracks', {}).get('items', []) + + paginated_playlist_data = get_spotify_info( + playlist_spotify_id, "playlist", offset=offset, limit=limit + ) + if ( + not paginated_playlist_data + or "tracks" not in paginated_playlist_data + ): + break + + page_items = paginated_playlist_data.get("tracks", {}).get("items", []) if not page_items: break all_api_track_items.extend(page_items) - - if paginated_playlist_data.get('tracks', {}).get('next'): + + if paginated_playlist_data.get("tracks", {}).get("next"): offset += limit else: break current_api_track_ids = set() api_track_id_to_item_map = {} - for item in all_api_track_items: # Use all_api_track_items - track = item.get('track') - if track and track.get('id') and not track.get('is_local'): - track_id = track['id'] + for item in all_api_track_items: # Use all_api_track_items + track = item.get("track") + if track and track.get("id") and not track.get("is_local"): + track_id = track["id"] current_api_track_ids.add(track_id) - api_track_id_to_item_map[track_id] = item - + api_track_id_to_item_map[track_id] = item + db_track_ids = get_playlist_track_ids_from_db(playlist_spotify_id) new_track_ids_for_download = current_api_track_ids - db_track_ids queued_for_download_count = 0 if new_track_ids_for_download: - logger.info(f"Playlist Watch Manager: Found {len(new_track_ids_for_download)} new tracks for playlist '{playlist_name}' to download.") + logger.info( + f"Playlist Watch Manager: Found {len(new_track_ids_for_download)} new tracks for playlist '{playlist_name}' to download." + ) for track_id in new_track_ids_for_download: api_item = api_track_id_to_item_map.get(track_id) if not api_item or not api_item.get("track"): - logger.warning(f"Playlist Watch Manager: Missing track details in API map for new track_id {track_id} in playlist {playlist_spotify_id}. Cannot queue.") + logger.warning( + f"Playlist Watch Manager: Missing track details in API map for new track_id {track_id} in playlist {playlist_spotify_id}. Cannot queue." + ) continue - + track_to_queue = api_item["track"] task_payload = { "download_type": "track", "url": construct_spotify_url(track_id, "track"), - "name": track_to_queue.get('name', 'Unknown Track'), - "artist": ", ".join([a['name'] for a in track_to_queue.get('artists', []) if a.get('name')]), + "name": track_to_queue.get("name", "Unknown Track"), + "artist": ", ".join( + [ + a["name"] + for a in track_to_queue.get("artists", []) + if a.get("name") + ] + ), "orig_request": { "source": "playlist_watch", "playlist_id": playlist_spotify_id, "playlist_name": playlist_name, "track_spotify_id": track_id, - "track_item_for_db": api_item # Pass full API item for DB update on completion - } + "track_item_for_db": api_item, # Pass full API item for DB update on completion + }, # "track_details_for_db" was old name, using track_item_for_db consistent with celery_tasks } try: - task_id_or_none = download_queue_manager.add_task(task_payload, from_watch_job=True) - if task_id_or_none: # Task was newly queued - logger.info(f"Playlist Watch Manager: Queued download task {task_id_or_none} for new track {track_id} ('{track_to_queue.get('name')}') from playlist '{playlist_name}'.") + task_id_or_none = download_queue_manager.add_task( + task_payload, from_watch_job=True + ) + if task_id_or_none: # Task was newly queued + logger.info( + f"Playlist Watch Manager: Queued download task {task_id_or_none} for new track {track_id} ('{track_to_queue.get('name')}') from playlist '{playlist_name}'." + ) queued_for_download_count += 1 # If task_id_or_none is None, it was a duplicate and not re-queued, Celery manager handles logging. except Exception as e: - logger.error(f"Playlist Watch Manager: Failed to queue download for new track {track_id} from playlist '{playlist_name}': {e}", exc_info=True) - logger.info(f"Playlist Watch Manager: Attempted to queue {queued_for_download_count} new tracks for playlist '{playlist_name}'.") + logger.error( + f"Playlist Watch Manager: Failed to queue download for new track {track_id} from playlist '{playlist_name}': {e}", + exc_info=True, + ) + logger.info( + f"Playlist Watch Manager: Attempted to queue {queued_for_download_count} new tracks for playlist '{playlist_name}'." + ) else: - logger.info(f"Playlist Watch Manager: No new tracks to download for playlist '{playlist_name}'.") + logger.info( + f"Playlist Watch Manager: No new tracks to download for playlist '{playlist_name}'." + ) # Update DB for tracks that are still present in API (e.g. update 'last_seen_in_spotify') # add_tracks_to_playlist_db handles INSERT OR REPLACE, updating existing entries. # We should pass all current API tracks to ensure their `last_seen_in_spotify` and `is_present_in_spotify` are updated. - if all_api_track_items: # If there are any tracks in the API for this playlist - logger.info(f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'.") - add_tracks_to_playlist_db(playlist_spotify_id, all_api_track_items) - + if ( + all_api_track_items + ): # If there are any tracks in the API for this playlist + logger.info( + f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'." + ) + add_tracks_to_playlist_db(playlist_spotify_id, all_api_track_items) removed_db_ids = db_track_ids - current_api_track_ids if removed_db_ids: - logger.info(f"Playlist Watch Manager: {len(removed_db_ids)} tracks removed from Spotify playlist '{playlist_name}'. Marking in DB.") - mark_tracks_as_not_present_in_spotify(playlist_spotify_id, list(removed_db_ids)) + logger.info( + f"Playlist Watch Manager: {len(removed_db_ids)} tracks removed from Spotify playlist '{playlist_name}'. Marking in DB." + ) + mark_tracks_as_not_present_in_spotify( + playlist_spotify_id, list(removed_db_ids) + ) - update_playlist_snapshot(playlist_spotify_id, api_snapshot_id, api_total_tracks) # api_total_tracks from initial fetch - logger.info(f"Playlist Watch Manager: Finished checking playlist '{playlist_name}'. Snapshot ID updated. API Total Tracks: {api_total_tracks}.") + update_playlist_snapshot( + playlist_spotify_id, api_snapshot_id, api_total_tracks + ) # api_total_tracks from initial fetch + logger.info( + f"Playlist Watch Manager: Finished checking playlist '{playlist_name}'. Snapshot ID updated. API Total Tracks: {api_total_tracks}." + ) except Exception as e: - logger.error(f"Playlist Watch Manager: Error processing playlist {playlist_spotify_id}: {e}", exc_info=True) - - time.sleep(max(1, config.get("delay_between_playlists_seconds", 2))) + logger.error( + f"Playlist Watch Manager: Error processing playlist {playlist_spotify_id}: {e}", + exc_info=True, + ) + + time.sleep(max(1, config.get("delay_between_playlists_seconds", 2))) logger.info("Playlist Watch Manager: Finished checking all watched playlists.") + def check_watched_artists(specific_artist_id: str = None): """Checks watched artists for new albums and queues downloads.""" - logger.info(f"Artist Watch Manager: Starting check. Specific artist: {specific_artist_id or 'All'}") + logger.info( + f"Artist Watch Manager: Starting check. Specific artist: {specific_artist_id or 'All'}" + ) config = get_watch_config() - watched_album_groups = [g.lower() for g in config.get("watchedArtistAlbumGroup", ["album", "single"])] - logger.info(f"Artist Watch Manager: Watching for album groups: {watched_album_groups}") + watched_album_groups = [ + g.lower() for g in config.get("watchedArtistAlbumGroup", ["album", "single"]) + ] + logger.info( + f"Artist Watch Manager: Watching for album groups: {watched_album_groups}" + ) if specific_artist_id: artist_obj_in_db = get_watched_artist(specific_artist_id) if not artist_obj_in_db: - logger.error(f"Artist Watch Manager: Artist {specific_artist_id} not found in watch database.") + logger.error( + f"Artist Watch Manager: Artist {specific_artist_id} not found in watch database." + ) return artists_to_check = [artist_obj_in_db] else: @@ -227,200 +302,282 @@ def check_watched_artists(specific_artist_id: str = None): return for artist_in_db in artists_to_check: - artist_spotify_id = artist_in_db['spotify_id'] - artist_name = artist_in_db['name'] - logger.info(f"Artist Watch Manager: Checking artist '{artist_name}' ({artist_spotify_id})...") + artist_spotify_id = artist_in_db["spotify_id"] + artist_name = artist_in_db["name"] + logger.info( + f"Artist Watch Manager: Checking artist '{artist_name}' ({artist_spotify_id})..." + ) try: # Spotify API for artist albums is paginated. # We need to fetch all albums. get_spotify_info with type 'artist-albums' should handle this. # Let's assume get_spotify_info(artist_id, 'artist-albums') returns a list of all album objects. # Or we implement pagination here. - - all_artist_albums_from_api = [] + + all_artist_albums_from_api: List[Dict[str, Any]] = [] offset = 0 - limit = 50 # Spotify API limit for artist albums + limit = 50 # Spotify API limit for artist albums while True: # The 'artist-albums' type for get_spotify_info needs to support pagination params. # And return a list of album objects. - logger.debug(f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}") - artist_albums_page = get_spotify_info(artist_spotify_id, "artist_discography", limit=limit, offset=offset) + logger.debug( + f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}" + ) + artist_albums_page = get_spotify_info( + artist_spotify_id, "artist_discography", limit=limit, offset=offset + ) - if not artist_albums_page or not isinstance(artist_albums_page.get('items'), list): - logger.warning(f"Artist Watch Manager: No album items found or invalid format for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Response: {artist_albums_page}") + if not artist_albums_page or not isinstance( + artist_albums_page.get("items"), list + ): + logger.warning( + f"Artist Watch Manager: No album items found or invalid format for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Response: {artist_albums_page}" + ) break - - current_page_albums = artist_albums_page.get('items', []) + + current_page_albums = artist_albums_page.get("items", []) if not current_page_albums: - logger.info(f"Artist Watch Manager: No more albums on page for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Total fetched so far: {len(all_artist_albums_from_api)}.") + logger.info( + f"Artist Watch Manager: No more albums on page for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Total fetched so far: {len(all_artist_albums_from_api)}." + ) break - - logger.debug(f"Artist Watch Manager: Fetched {len(current_page_albums)} albums on current page for artist '{artist_name}'.") + + logger.debug( + f"Artist Watch Manager: Fetched {len(current_page_albums)} albums on current page for artist '{artist_name}'." + ) all_artist_albums_from_api.extend(current_page_albums) # Correct pagination: Check if Spotify indicates a next page URL # The `next` field in Spotify API responses is a URL to the next page or null. - if artist_albums_page.get('next'): - offset += limit # CORRECT: Increment offset by the limit used for the request + if artist_albums_page.get("next"): + offset += limit # CORRECT: Increment offset by the limit used for the request else: - logger.info(f"Artist Watch Manager: No 'next' page URL for artist '{artist_name}'. Pagination complete. Total albums fetched: {len(all_artist_albums_from_api)}.") + logger.info( + f"Artist Watch Manager: No 'next' page URL for artist '{artist_name}'. Pagination complete. Total albums fetched: {len(all_artist_albums_from_api)}." + ) break - - # total_albums_from_api = len(all_artist_albums_from_api) + + # total_albums_from_api = len(all_artist_albums_from_api) # Use the 'total' field from the API response for a more accurate count of all available albums (matching current API filter if any) - api_reported_total_albums = artist_albums_page.get('total', 0) if 'artist_albums_page' in locals() and artist_albums_page else len(all_artist_albums_from_api) - logger.info(f"Artist Watch Manager: Fetched {len(all_artist_albums_from_api)} albums in total from API for artist '{artist_name}'. API reports total: {api_reported_total_albums}.") + api_reported_total_albums = ( + artist_albums_page.get("total", 0) + if "artist_albums_page" in locals() and artist_albums_page + else len(all_artist_albums_from_api) + ) + logger.info( + f"Artist Watch Manager: Fetched {len(all_artist_albums_from_api)} albums in total from API for artist '{artist_name}'. API reports total: {api_reported_total_albums}." + ) db_album_ids = get_artist_album_ids_from_db(artist_spotify_id) - logger.info(f"Artist Watch Manager: Found {len(db_album_ids)} albums in DB for artist '{artist_name}'. These will be skipped if re-encountered unless logic changes.") + logger.info( + f"Artist Watch Manager: Found {len(db_album_ids)} albums in DB for artist '{artist_name}'. These will be skipped if re-encountered unless logic changes." + ) queued_for_download_count = 0 - processed_album_ids_in_run = set() # To avoid processing duplicate album_ids if API returns them across pages (should not happen with correct pagination) + processed_album_ids_in_run = set() # To avoid processing duplicate album_ids if API returns them across pages (should not happen with correct pagination) for album_data in all_artist_albums_from_api: - album_id = album_data.get('id') - album_name = album_data.get('name', 'Unknown Album') - album_group = album_data.get('album_group', 'N/A').lower() - album_type = album_data.get('album_type', 'N/A').lower() + album_id = album_data.get("id") + album_name = album_data.get("name", "Unknown Album") + album_group = album_data.get("album_group", "N/A").lower() + album_type = album_data.get("album_type", "N/A").lower() if not album_id: - logger.warning(f"Artist Watch Manager: Skipping album without ID for artist '{artist_name}'. Album data: {album_data}") + logger.warning( + f"Artist Watch Manager: Skipping album without ID for artist '{artist_name}'. Album data: {album_data}" + ) continue - + if album_id in processed_album_ids_in_run: - logger.debug(f"Artist Watch Manager: Album '{album_name}' ({album_id}) already processed in this run. Skipping.") + logger.debug( + f"Artist Watch Manager: Album '{album_name}' ({album_id}) already processed in this run. Skipping." + ) continue processed_album_ids_in_run.add(album_id) # Filter based on watchedArtistAlbumGroup # The album_group field is generally preferred for this type of categorization as per Spotify docs. is_matching_group = album_group in watched_album_groups - - logger.debug(f"Artist '{artist_name}', Album '{album_name}' ({album_id}): album_group='{album_group}', album_type='{album_type}'. Watched groups: {watched_album_groups}. Match: {is_matching_group}.") + + logger.debug( + f"Artist '{artist_name}', Album '{album_name}' ({album_id}): album_group='{album_group}', album_type='{album_type}'. Watched groups: {watched_album_groups}. Match: {is_matching_group}." + ) if not is_matching_group: - logger.debug(f"Artist Watch Manager: Skipping album '{album_name}' ({album_id}) by '{artist_name}' - group '{album_group}' not in watched list: {watched_album_groups}.") + logger.debug( + f"Artist Watch Manager: Skipping album '{album_name}' ({album_id}) by '{artist_name}' - group '{album_group}' not in watched list: {watched_album_groups}." + ) continue - - logger.info(f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' (group: {album_group}) IS a matching group.") + + logger.info( + f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' (group: {album_group}) IS a matching group." + ) if album_id not in db_album_ids: - logger.info(f"Artist Watch Manager: Found NEW matching album '{album_name}' ({album_id}) by '{artist_name}'. Queuing for download.") - - album_artists_list = album_data.get('artists', []) - album_main_artist_name = album_artists_list[0].get('name', 'Unknown Artist') if album_artists_list else 'Unknown Artist' + logger.info( + f"Artist Watch Manager: Found NEW matching album '{album_name}' ({album_id}) by '{artist_name}'. Queuing for download." + ) + + album_artists_list = album_data.get("artists", []) + album_main_artist_name = ( + album_artists_list[0].get("name", "Unknown Artist") + if album_artists_list + else "Unknown Artist" + ) task_payload = { - "download_type": "album", # Or "track" if downloading individual tracks of album later + "download_type": "album", # Or "track" if downloading individual tracks of album later "url": construct_spotify_url(album_id, "album"), "name": album_name, - "artist": album_main_artist_name, # Primary artist of the album + "artist": album_main_artist_name, # Primary artist of the album "orig_request": { "source": "artist_watch", - "artist_spotify_id": artist_spotify_id, # Watched artist + "artist_spotify_id": artist_spotify_id, # Watched artist "artist_name": artist_name, "album_spotify_id": album_id, - "album_data_for_db": album_data # Pass full API album object for DB update on completion/queuing - } + "album_data_for_db": album_data, # Pass full API album object for DB update on completion/queuing + }, } try: # Add to DB first with task_id, then queue. Or queue and add task_id to DB. # Let's use add_or_update_album_for_artist to record it with a task_id before queuing. # The celery_queue_manager.add_task might return None if it's a duplicate. - + # Record the album in DB as being processed for download # Task_id will be added if successfully queued - + # We should call add_task first, and if it returns a task_id (not a duplicate), then update our DB. - task_id_or_none = download_queue_manager.add_task(task_payload, from_watch_job=True) - - if task_id_or_none: # Task was newly queued + task_id_or_none = download_queue_manager.add_task( + task_payload, from_watch_job=True + ) + + if task_id_or_none: # Task was newly queued # REMOVED: add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=task_id_or_none, is_download_complete=False) # The album will be added/updated in the DB by celery_tasks.py upon successful download completion. - logger.info(f"Artist Watch Manager: Queued download task {task_id_or_none} for new album '{album_name}' from artist '{artist_name}'. DB entry will be created/updated on success.") + logger.info( + f"Artist Watch Manager: Queued download task {task_id_or_none} for new album '{album_name}' from artist '{artist_name}'. DB entry will be created/updated on success." + ) queued_for_download_count += 1 # If task_id_or_none is None, it was a duplicate. Celery manager handles logging. except Exception as e: - logger.error(f"Artist Watch Manager: Failed to queue download for new album {album_id} ('{album_name}') from artist '{artist_name}': {e}", exc_info=True) + logger.error( + f"Artist Watch Manager: Failed to queue download for new album {album_id} ('{album_name}') from artist '{artist_name}': {e}", + exc_info=True, + ) else: - logger.info(f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' already known in DB (ID found in db_album_ids). Skipping queue.") + logger.info( + f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' already known in DB (ID found in db_album_ids). Skipping queue." + ) # Optionally, update its entry (e.g. last_seen, or if details changed), but for now, we only queue new ones. # add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=None, is_download_complete=False) # would update added_to_db_at - logger.info(f"Artist Watch Manager: For artist '{artist_name}', processed {len(all_artist_albums_from_api)} API albums, attempted to queue {queued_for_download_count} new albums.") - - update_artist_metadata_after_check(artist_spotify_id, api_reported_total_albums) - logger.info(f"Artist Watch Manager: Finished checking artist '{artist_name}'. DB metadata updated. API reported total albums (for API filter): {api_reported_total_albums}.") + logger.info( + f"Artist Watch Manager: For artist '{artist_name}', processed {len(all_artist_albums_from_api)} API albums, attempted to queue {queued_for_download_count} new albums." + ) + + update_artist_metadata_after_check( + artist_spotify_id, api_reported_total_albums + ) + logger.info( + f"Artist Watch Manager: Finished checking artist '{artist_name}'. DB metadata updated. API reported total albums (for API filter): {api_reported_total_albums}." + ) except Exception as e: - logger.error(f"Artist Watch Manager: Error processing artist {artist_spotify_id} ('{artist_name}'): {e}", exc_info=True) - + logger.error( + f"Artist Watch Manager: Error processing artist {artist_spotify_id} ('{artist_name}'): {e}", + exc_info=True, + ) + time.sleep(max(1, config.get("delay_between_artists_seconds", 5))) logger.info("Artist Watch Manager: Finished checking all watched artists.") + def playlist_watch_scheduler(): """Periodically calls check_watched_playlists and check_watched_artists.""" logger.info("Watch Scheduler: Thread started.") - config = get_watch_config() # Load config once at start, or reload each loop? Reload each loop for dynamic changes. - + while not STOP_EVENT.is_set(): - current_config = get_watch_config() # Get latest config for this run + current_config = get_watch_config() # Get latest config for this run interval = current_config.get("watchPollIntervalSeconds", 3600) - watch_enabled = current_config.get("enabled", False) # Get enabled status + watch_enabled = current_config.get("enabled", False) # Get enabled status if not watch_enabled: - logger.info("Watch Scheduler: Watch feature is disabled in config. Skipping checks.") - STOP_EVENT.wait(interval) # Still respect poll interval for checking config again - continue # Skip to next iteration - + logger.info( + "Watch Scheduler: Watch feature is disabled in config. Skipping checks." + ) + STOP_EVENT.wait( + interval + ) # Still respect poll interval for checking config again + continue # Skip to next iteration + try: logger.info("Watch Scheduler: Starting playlist check run.") check_watched_playlists() logger.info("Watch Scheduler: Playlist check run completed.") except Exception as e: - logger.error(f"Watch Scheduler: Unhandled exception during check_watched_playlists: {e}", exc_info=True) - + logger.error( + f"Watch Scheduler: Unhandled exception during check_watched_playlists: {e}", + exc_info=True, + ) + # Add a small delay between playlist and artist checks if desired # time.sleep(current_config.get("delay_between_check_types_seconds", 10)) - if STOP_EVENT.is_set(): break # Check stop event again before starting artist check + if STOP_EVENT.is_set(): + break # Check stop event again before starting artist check try: logger.info("Watch Scheduler: Starting artist check run.") check_watched_artists() logger.info("Watch Scheduler: Artist check run completed.") except Exception as e: - logger.error(f"Watch Scheduler: Unhandled exception during check_watched_artists: {e}", exc_info=True) - - logger.info(f"Watch Scheduler: All checks complete. Next run in {interval} seconds.") - STOP_EVENT.wait(interval) + logger.error( + f"Watch Scheduler: Unhandled exception during check_watched_artists: {e}", + exc_info=True, + ) + + logger.info( + f"Watch Scheduler: All checks complete. Next run in {interval} seconds." + ) + STOP_EVENT.wait(interval) logger.info("Watch Scheduler: Thread stopped.") -# --- Global thread for the scheduler --- -_watch_scheduler_thread = None # Renamed from _playlist_watch_thread -def start_watch_manager(): # Renamed from start_playlist_watch_manager +# --- Global thread for the scheduler --- +_watch_scheduler_thread = None # Renamed from _playlist_watch_thread + + +def start_watch_manager(): # Renamed from start_playlist_watch_manager global _watch_scheduler_thread if _watch_scheduler_thread is None or not _watch_scheduler_thread.is_alive(): STOP_EVENT.clear() # Initialize DBs on start - from routes.utils.watch.db import init_playlists_db, init_artists_db # Updated import - init_playlists_db() # For playlists - init_artists_db() # For artists - - _watch_scheduler_thread = threading.Thread(target=playlist_watch_scheduler, daemon=True) + from routes.utils.watch.db import ( + init_playlists_db, + init_artists_db, + ) # Updated import + + init_playlists_db() # For playlists + init_artists_db() # For artists + + _watch_scheduler_thread = threading.Thread( + target=playlist_watch_scheduler, daemon=True + ) _watch_scheduler_thread.start() - logger.info("Watch Manager: Background scheduler started (includes playlists and artists).") + logger.info( + "Watch Manager: Background scheduler started (includes playlists and artists)." + ) else: logger.info("Watch Manager: Background scheduler already running.") -def stop_watch_manager(): # Renamed from stop_playlist_watch_manager + +def stop_watch_manager(): # Renamed from stop_playlist_watch_manager global _watch_scheduler_thread if _watch_scheduler_thread and _watch_scheduler_thread.is_alive(): logger.info("Watch Manager: Stopping background scheduler...") - STOP_EVENT.set() - _watch_scheduler_thread.join(timeout=10) + STOP_EVENT.set() + _watch_scheduler_thread.join(timeout=10) if _watch_scheduler_thread.is_alive(): logger.warning("Watch Manager: Scheduler thread did not stop in time.") else: @@ -429,5 +586,6 @@ def stop_watch_manager(): # Renamed from stop_playlist_watch_manager else: logger.info("Watch Manager: Background scheduler not running.") + # If this module is imported, and you want to auto-start the manager, you could call start_watch_manager() here. # However, it's usually better to explicitly start it from the main application/__init__.py. diff --git a/src/js/album.ts b/src/js/album.ts index 3345c8d..ddad930 100644 --- a/src/js/album.ts +++ b/src/js/album.ts @@ -130,13 +130,13 @@ function renderAlbum(album: Album) {

The explicit content filter is controlled by environment variables.

`; - + const contentContainer = document.getElementById('album-header'); if (contentContainer) { contentContainer.innerHTML = placeholderContent; contentContainer.classList.remove('hidden'); } - + return; // Stop rendering the actual album content } @@ -216,7 +216,7 @@ function renderAlbum(album: Album) { const albumHeader = document.getElementById('album-header'); if (albumHeader) albumHeader.appendChild(downloadAlbumBtn); // Null check } - + if (downloadAlbumBtn) { // Null check for downloadAlbumBtn if (isExplicitFilterEnabled && hasExplicitTrack) { // Disable the album download button and display a message explaining why @@ -256,7 +256,7 @@ function renderAlbum(album: Album) { if (album.tracks?.items) { album.tracks.items.forEach((track, index) => { if (!track) return; // Skip null or undefined tracks - + // Skip explicit tracks if filter is enabled if (isExplicitFilterEnabled && track.explicit) { // Add a placeholder for filtered explicit tracks @@ -273,7 +273,7 @@ function renderAlbum(album: Album) { tracksList.appendChild(trackElement); return; } - + const trackElement = document.createElement('div'); trackElement.className = 'track'; trackElement.innerHTML = ` @@ -283,13 +283,13 @@ function renderAlbum(album: Album) { ${track.name || 'Unknown Track'}
- ${track.artists?.map(a => + ${track.artists?.map(a => `${a?.name || 'Unknown Artist'}` ).join(', ') || 'Unknown Artist'}
${msToTime(track.duration_ms || 0)}
- `; - + list.appendChild(credItem); }); @@ -556,13 +556,13 @@ async function handleEditCredential(e: MouseEvent) { (document.querySelector(`[data-service="${service}"]`) as HTMLElement | null)?.click(); await new Promise(resolve => setTimeout(resolve, 50)); - setFormVisibility(true); + setFormVisibility(true); const response = await fetch(`/api/credentials/${service}/${name}`); if (!response.ok) { throw new Error(`Failed to load credential: ${response.statusText}`); } - + const data = await response.json(); // data = {name, region, blob_content/arl} currentCredential = name ? name : null; // Set the global currentCredential to the one being edited @@ -591,7 +591,7 @@ async function handleEditCredential(e: MouseEvent) { (document.getElementById('formTitle') as HTMLElement | null)!.textContent = `Edit ${service!.charAt(0).toUpperCase() + service!.slice(1)} Account`; (document.getElementById('submitCredentialBtn') as HTMLElement | null)!.textContent = 'Update Account'; - + toggleSearchFieldsVisibility(false); // Ensure old per-account search fields are hidden } catch (error: any) { showConfigError(error.message); @@ -618,11 +618,11 @@ async function handleEditSearchCredential(e: Event) { function toggleSearchFieldsVisibility(showSearchFields: boolean) { const serviceFieldsDiv = document.getElementById('serviceFields') as HTMLElement | null; const searchFieldsDiv = document.getElementById('searchFields') as HTMLElement | null; // This div might be removed from HTML if not used by other services - + // Simplified: Always show serviceFields, always hide (old) searchFields in this form context. // The new global Spotify API fields are in a separate card and handled by different functions. if(serviceFieldsDiv) serviceFieldsDiv.style.display = 'block'; - if(searchFieldsDiv) searchFieldsDiv.style.display = 'none'; + if(searchFieldsDiv) searchFieldsDiv.style.display = 'none'; // Ensure required attributes are set correctly for visible service fields if (serviceConfig[currentService] && serviceConfig[currentService].fields) { @@ -644,31 +644,31 @@ function toggleSearchFieldsVisibility(showSearchFields: boolean) { function updateFormFields() { const serviceFieldsDiv = document.getElementById('serviceFields') as HTMLElement | null; - + if(serviceFieldsDiv) serviceFieldsDiv.innerHTML = ''; if (serviceConfig[currentService] && serviceConfig[currentService].fields) { serviceConfig[currentService].fields.forEach((field: { id: string; label: string; type: string; placeholder?: string; rows?: number; }) => { const fieldDiv = document.createElement('div'); fieldDiv.className = 'form-group'; - + let inputElementHTML = ''; if (field.type === 'textarea') { - inputElementHTML = ``; } else { - inputElementHTML = ``; } // Region field is optional, so remove 'required' if id is 'accountRegion' @@ -686,9 +686,9 @@ function updateFormFields() { (document.getElementById('formTitle') as HTMLElement | null)!.textContent = `Add New ${currentService.charAt(0).toUpperCase() + currentService.slice(1)} Account`; (document.getElementById('submitCredentialBtn') as HTMLElement | null)!.textContent = 'Save Account'; - - toggleSearchFieldsVisibility(false); - isEditingSearch = false; + + toggleSearchFieldsVisibility(false); + isEditingSearch = false; // Show/hide region hints based on current service if (spotifyRegionHint && deezerRegionHint) { @@ -716,7 +716,7 @@ function populateFormFields(service: string, data: Record) { async function handleCredentialSubmit(e: Event) { e.preventDefault(); const service = (document.querySelector('.tab-button.active') as HTMLElement | null)?.dataset.service; - + // Get the account name from the 'accountName' field within the dynamically generated serviceFields const accountNameInput = document.getElementById('accountName') as HTMLInputElement | null; const accountNameValue = accountNameInput?.value.trim(); @@ -745,7 +745,7 @@ async function handleCredentialSubmit(e: Event) { const formData: Record = {}; let isValid = true; let firstInvalidField: HTMLInputElement | HTMLTextAreaElement | null = null; - + const currentServiceFields = serviceConfig[service!]?.fields as Array<{id: string, label: string, type: string}> | undefined; if (currentServiceFields) { @@ -753,7 +753,7 @@ async function handleCredentialSubmit(e: Event) { const input = document.getElementById(field.id) as HTMLInputElement | HTMLTextAreaElement | null; const value = input ? input.value.trim() : ''; formData[field.id] = value; - + const isRequired = input?.hasAttribute('required'); if (isRequired && !value) { isValid = false; @@ -763,7 +763,7 @@ async function handleCredentialSubmit(e: Event) { } else { throw new Error(`No fields configured for service: ${service}`); } - + if (!isValid) { if (firstInvalidField) { const nonNullInvalidField = firstInvalidField as HTMLInputElement | HTMLTextAreaElement; @@ -776,8 +776,8 @@ async function handleCredentialSubmit(e: Event) { } // The validator in serviceConfig now expects fields like 'accountName', 'accountRegion', etc. - data = serviceConfig[service!].validator(formData); - + data = serviceConfig[service!].validator(formData); + // If it's a new credential and the validator didn't explicitly set 'name' from 'accountName', // (though it should: serviceConfig.spotify.validator expects data.accountName and sets 'name') // we ensure the 'name' in the payload matches accountNameValue if it's a new POST. @@ -800,13 +800,13 @@ async function handleCredentialSubmit(e: Event) { } await updateAccountSelectors(); - loadCredentials(service!); - + loadCredentials(service!); + showConfigSuccess('Account saved successfully'); - + setTimeout(() => { - setFormVisibility(false); - }, 2000); + setFormVisibility(false); + }, 2000); } catch (error: any) { showConfigError(error.message); } @@ -823,17 +823,17 @@ function resetForm() { if (accountNameInput) { accountNameInput.disabled = false; } - + const convertToSelect = document.getElementById('convertToSelect') as HTMLSelectElement | null; if (convertToSelect) { - convertToSelect.value = ''; - updateBitrateOptions(''); + convertToSelect.value = ''; + updateBitrateOptions(''); } const serviceName = currentService.charAt(0).toUpperCase() + currentService.slice(1); (document.getElementById('formTitle') as HTMLElement | null)!.textContent = `Add New ${serviceName} Account`; (document.getElementById('submitCredentialBtn') as HTMLElement | null)!.textContent = 'Save Account'; - + toggleSearchFieldsVisibility(false); } @@ -876,7 +876,7 @@ async function saveConfig() { // Set default service selection const defaultServiceSelect = document.getElementById('defaultServiceSelect') as HTMLSelectElement | null; if (defaultServiceSelect) defaultServiceSelect.value = savedConfig.service || 'spotify'; - + // Update the service-specific options based on selected service updateServiceSpecificOptions(); @@ -916,7 +916,7 @@ async function saveConfig() { if (tracknumPaddingToggle) tracknumPaddingToggle.checked = savedConfig.tracknum_padding === undefined ? true : !!savedConfig.tracknum_padding; const saveCoverToggle = document.getElementById('saveCoverToggle') as HTMLInputElement | null; if (saveCoverToggle) saveCoverToggle.checked = savedConfig.save_cover === undefined ? true : !!savedConfig.save_cover; - + // Load conversion settings after save const convertToSelect = document.getElementById('convertToSelect') as HTMLSelectElement | null; if (convertToSelect) { @@ -930,7 +930,7 @@ async function saveConfig() { } } else if (bitrateSelect) { if (convertToSelect && !CONVERSION_FORMATS[convertToSelect.value]?.length) { - bitrateSelect.value = ''; + bitrateSelect.value = ''; } } @@ -949,7 +949,7 @@ function updateExplicitFilterStatus(isEnabled: boolean) { if (statusElement) { // Remove existing classes statusElement.classList.remove('enabled', 'disabled'); - + // Add appropriate class and text based on whether filter is enabled if (isEnabled) { statusElement.textContent = 'Enabled'; @@ -976,15 +976,15 @@ function showConfigSuccess(message: string) { // Function to copy the selected placeholder to clipboard function copyPlaceholderToClipboard(select: HTMLSelectElement) { const placeholder = select.value; - + if (!placeholder) return; // If nothing selected - + // Copy to clipboard navigator.clipboard.writeText(placeholder) .then(() => { // Show success notification showCopyNotification(`Copied ${placeholder} to clipboard`); - + // Reset select to default after a short delay setTimeout(() => { select.selectedIndex = 0; @@ -1004,20 +1004,20 @@ function showCopyNotification(message: string) { notificationContainer.id = 'copyNotificationContainer'; document.body.appendChild(notificationContainer); } - + // Create notification element const notification = document.createElement('div'); notification.className = 'copy-notification'; notification.textContent = message; - + // Add to container notificationContainer.appendChild(notification); - + // Trigger animation setTimeout(() => { notification.classList.add('show'); }, 10); - + // Remove after animation completes setTimeout(() => { notification.classList.remove('show'); diff --git a/src/js/history.ts b/src/js/history.ts index 7816feb..0ec9984 100644 --- a/src/js/history.ts +++ b/src/js/history.ts @@ -77,14 +77,14 @@ document.addEventListener('DOMContentLoaded', () => { qualityDisplay = `${entry.bitrate}k (${entry.quality_profile || 'Profile'})`; } row.insertCell().textContent = qualityDisplay; - + const statusCell = row.insertCell(); statusCell.textContent = entry.status_final || 'N/A'; statusCell.className = `status-${entry.status_final}`; row.insertCell().textContent = entry.timestamp_added ? new Date(entry.timestamp_added * 1000).toLocaleString() : 'N/A'; row.insertCell().textContent = entry.timestamp_completed ? new Date(entry.timestamp_completed * 1000).toLocaleString() : 'N/A'; - + const detailsCell = row.insertCell(); const detailsButton = document.createElement('button'); detailsButton.innerHTML = `Details`; @@ -185,4 +185,4 @@ document.addEventListener('DOMContentLoaded', () => { // Initial fetch fetchHistory(); -}); \ No newline at end of file +}); \ No newline at end of file diff --git a/src/js/main.ts b/src/js/main.ts index 6d31dc1..1b2ec0f 100644 --- a/src/js/main.ts +++ b/src/js/main.ts @@ -206,23 +206,23 @@ document.addEventListener('DOMContentLoaded', function() { try { const url = `/api/search?q=${encodeURIComponent(currentQuery)}&search_type=${currentSearchType}&limit=40`; const response = await fetch(url); - + if (!response.ok) { throw new Error('Network response was not ok'); } - + const data = await response.json() as SearchResponse; // Assert type for API response - + // Hide loading indicator showLoading(false); - + // Render results if (data && data.items && data.items.length > 0) { if(resultsContainer) resultsContainer.innerHTML = ''; - + // Filter out items with null/undefined essential display parameters const validItems = filterValidItems(data.items, currentSearchType); - + if (validItems.length === 0) { // No valid items found after filtering if(resultsContainer) resultsContainer.innerHTML = ` @@ -232,19 +232,19 @@ document.addEventListener('DOMContentLoaded', function() { `; return; } - + validItems.forEach((item, index) => { const cardElement = createResultCard(item, currentSearchType, index); - + // Store the item data directly on the button element const downloadBtn = cardElement.querySelector('.download-btn') as HTMLButtonElement | null; if (downloadBtn) { downloadBtn.dataset.itemIndex = index.toString(); } - + if(resultsContainer) resultsContainer.appendChild(cardElement); }); - + // Attach download handlers to the newly created cards attachDownloadListeners(validItems); } else { @@ -271,63 +271,63 @@ document.addEventListener('DOMContentLoaded', function() { */ function filterValidItems(items: SearchResultItem[], type: string): SearchResultItem[] { if (!items) return []; - + return items.filter(item => { // Skip null/undefined items if (!item) return false; - + // Skip explicit content if filter is enabled if (downloadQueue.isExplicitFilterEnabled() && ('explicit' in item && item.explicit === true)) { return false; } - + // Check essential parameters based on search type switch (type) { case 'track': const trackItem = item as TrackResultItem; return ( trackItem.name && - trackItem.artists && + trackItem.artists && trackItem.artists.length > 0 && - trackItem.artists[0] && + trackItem.artists[0] && trackItem.artists[0].name && - trackItem.album && + trackItem.album && trackItem.album.name && - trackItem.external_urls && + trackItem.external_urls && trackItem.external_urls.spotify ); - + case 'album': const albumItem = item as AlbumResultItem; return ( albumItem.name && - albumItem.artists && + albumItem.artists && albumItem.artists.length > 0 && - albumItem.artists[0] && + albumItem.artists[0] && albumItem.artists[0].name && - albumItem.external_urls && + albumItem.external_urls && albumItem.external_urls.spotify ); - + case 'playlist': const playlistItem = item as PlaylistResultItem; return ( playlistItem.name && - playlistItem.owner && + playlistItem.owner && playlistItem.owner.display_name && playlistItem.tracks && - playlistItem.external_urls && + playlistItem.external_urls && playlistItem.external_urls.spotify ); - + case 'artist': const artistItem = item as ArtistResultItem; return ( artistItem.name && - artistItem.external_urls && + artistItem.external_urls && artistItem.external_urls.spotify ); - + default: // Default case - just check if the item exists (already handled by `if (!item) return false;`) return true; @@ -343,69 +343,69 @@ document.addEventListener('DOMContentLoaded', function() { const btn = btnElm as HTMLButtonElement; btn.addEventListener('click', (e: Event) => { e.stopPropagation(); - + // Get the item index from the button's dataset const itemIndexStr = btn.dataset.itemIndex; if (!itemIndexStr) return; const itemIndex = parseInt(itemIndexStr, 10); - + // Get the corresponding item const item = items[itemIndex]; if (!item) return; - + const currentSearchType = searchType?.value || 'track'; let itemId = item.id || ''; // Use item.id directly - + if (!itemId) { // Check if ID was found showError('Could not determine download ID'); return; } - + // Prepare metadata for the download let metadata: DownloadQueueItem; if (currentSearchType === 'track') { const trackItem = item as TrackResultItem; - metadata = { + metadata = { name: trackItem.name || 'Unknown', artist: trackItem.artists ? trackItem.artists[0]?.name : undefined, album: trackItem.album ? { name: trackItem.album.name, album_type: trackItem.album.album_type } : undefined }; } else if (currentSearchType === 'album') { const albumItem = item as AlbumResultItem; - metadata = { + metadata = { name: albumItem.name || 'Unknown', artist: albumItem.artists ? albumItem.artists[0]?.name : undefined, album: { name: albumItem.name, album_type: albumItem.album_type} }; } else if (currentSearchType === 'playlist') { const playlistItem = item as PlaylistResultItem; - metadata = { + metadata = { name: playlistItem.name || 'Unknown', // artist for playlist is owner artist: playlistItem.owner?.display_name }; } else if (currentSearchType === 'artist') { const artistItem = item as ArtistResultItem; - metadata = { + metadata = { name: artistItem.name || 'Unknown', artist: artistItem.name // For artist type, artist is the item name itself }; } else { metadata = { name: item.name || 'Unknown' }; // Fallback } - + // Disable the button and update text btn.disabled = true; - + // For artist downloads, show a different message since it will queue multiple albums if (currentSearchType === 'artist') { btn.innerHTML = 'Queueing albums...'; } else { btn.innerHTML = 'Queueing...'; } - + // Start the download - startDownload(itemId, currentSearchType, metadata, + startDownload(itemId, currentSearchType, metadata, (item as AlbumResultItem).album_type || ((item as TrackResultItem).album ? (item as TrackResultItem).album.album_type : null)) .then(() => { // For artists, show how many albums were queued @@ -434,11 +434,11 @@ document.addEventListener('DOMContentLoaded', function() { showError('Missing ID or type for download'); return; } - + try { // Use the centralized downloadQueue.download method await downloadQueue.download(itemId, type, item, albumType); - + // Make the queue visible after queueing downloadQueue.toggleVisibility(true); } catch (error: any) { @@ -455,11 +455,11 @@ document.addEventListener('DOMContentLoaded', function() { errorDiv.className = 'error'; errorDiv.textContent = message; document.body.appendChild(errorDiv); - + // Auto-remove after 5 seconds setTimeout(() => errorDiv.remove(), 5000); } - + /** * Shows a success message */ @@ -468,7 +468,7 @@ document.addEventListener('DOMContentLoaded', function() { successDiv.className = 'success'; successDiv.textContent = message; document.body.appendChild(successDiv); - + // Auto-remove after 5 seconds setTimeout(() => successDiv.remove(), 5000); } @@ -477,7 +477,7 @@ document.addEventListener('DOMContentLoaded', function() { * Checks if a string is a valid Spotify URL */ function isSpotifyUrl(url: string): boolean { - return url.includes('open.spotify.com') || + return url.includes('open.spotify.com') || url.includes('spotify:') || url.includes('link.tospotify.com'); } @@ -489,7 +489,7 @@ document.addEventListener('DOMContentLoaded', function() { // Allow optional path segments (e.g. intl-fr) before resource type const regex = /spotify\.com\/(?:[^\/]+\/)??(track|album|playlist|artist)\/([a-zA-Z0-9]+)/i; const match = url.match(regex); - + if (match) { return { type: match[1], @@ -504,7 +504,7 @@ document.addEventListener('DOMContentLoaded', function() { */ function msToMinutesSeconds(ms: number | undefined): string { if (!ms) return '0:00'; - + const minutes = Math.floor(ms / 60000); const seconds = ((ms % 60000) / 1000).toFixed(0); return `${minutes}:${seconds.padStart(2, '0')}`; @@ -516,10 +516,10 @@ document.addEventListener('DOMContentLoaded', function() { function createResultCard(item: SearchResultItem, type: string, index: number): HTMLDivElement { const cardElement = document.createElement('div'); cardElement.className = 'result-card'; - + // Set cursor to pointer for clickable cards cardElement.style.cursor = 'pointer'; - + // Get the appropriate image URL let imageUrl = '/static/images/placeholder.jpg'; // Type guards to safely access images @@ -539,11 +539,11 @@ document.addEventListener('DOMContentLoaded', function() { imageUrl = playlistItem.images[0].url; } } - + // Get the appropriate details based on type let subtitle = ''; let details = ''; - + switch (type) { case 'track': { @@ -574,7 +574,7 @@ document.addEventListener('DOMContentLoaded', function() { } break; } - + // Build the HTML cardElement.innerHTML = `
@@ -584,25 +584,25 @@ document.addEventListener('DOMContentLoaded', function() {
${subtitle}
${details}
`; - + // Add click event to navigate to the item's detail page cardElement.addEventListener('click', (e: MouseEvent) => { // Don't trigger if the download button was clicked const target = e.target as HTMLElement; - if (target.classList.contains('download-btn') || + if (target.classList.contains('download-btn') || target.parentElement?.classList.contains('download-btn')) { return; } - + if (item.id) { window.location.href = `/${type}/${item.id}`; } }); - + return cardElement; } diff --git a/src/js/playlist.ts b/src/js/playlist.ts index 7c20762..a24a207 100644 --- a/src/js/playlist.ts +++ b/src/js/playlist.ts @@ -266,7 +266,7 @@ function renderPlaylist(playlist: Playlist, isGlobalWatchEnabled: boolean) { downloadPlaylistBtn.classList.add('download-btn--disabled'); downloadPlaylistBtn.innerHTML = `Playlist Contains Explicit Tracks`; } - + if (downloadAlbumsBtn) { downloadAlbumsBtn.disabled = true; downloadAlbumsBtn.classList.add('download-btn--disabled'); @@ -322,23 +322,23 @@ function renderPlaylist(playlist: Playlist, isGlobalWatchEnabled: boolean) { // Render tracks list const tracksList = document.getElementById('tracks-list'); if (!tracksList) return; - + tracksList.innerHTML = ''; // Clear any existing content // Determine if the playlist is being watched to show/hide management buttons const watchPlaylistButton = document.getElementById('watchPlaylistBtn') as HTMLButtonElement; // isIndividuallyWatched checks if the button is visible and has the 'watching' class. // This implies global watch is enabled if the button is even interactable for individual status. - const isIndividuallyWatched = watchPlaylistButton && - watchPlaylistButton.classList.contains('watching') && + const isIndividuallyWatched = watchPlaylistButton && + watchPlaylistButton.classList.contains('watching') && !watchPlaylistButton.classList.contains('hidden'); if (playlist.tracks?.items) { playlist.tracks.items.forEach((item: PlaylistItem, index: number) => { if (!item || !item.track) return; // Skip null/undefined tracks - + const track = item.track; - + // Skip explicit tracks if filter is enabled if (isExplicitFilterEnabled && track.explicit) { // Add a placeholder for filtered explicit tracks @@ -356,7 +356,7 @@ function renderPlaylist(playlist: Playlist, isGlobalWatchEnabled: boolean) { tracksList.appendChild(trackElement); return; } - + const trackLink = `/track/${track.id || ''}`; const artistLink = `/artist/${track.artists?.[0]?.id || ''}`; const albumLink = `/album/${track.album?.id || ''}`; @@ -378,13 +378,13 @@ function renderPlaylist(playlist: Playlist, isGlobalWatchEnabled: boolean) {
${msToTime(track.duration_ms || 0)}
`; - + const actionsContainer = document.createElement('div'); actionsContainer.className = 'track-actions-container'; if (!(isExplicitFilterEnabled && hasExplicitTrack)) { const downloadBtnHTML = ` - `; actionsContainer.innerHTML += toggleKnownBtnHTML; } - + trackElement.innerHTML = trackHTML; trackElement.appendChild(actionsContainer); tracksList.appendChild(trackElement); @@ -435,7 +435,7 @@ function renderPlaylist(playlist: Playlist, isGlobalWatchEnabled: boolean) { */ function msToTime(duration: number) { if (!duration || isNaN(duration)) return '0:00'; - + const minutes = Math.floor(duration / 60000); const seconds = ((duration % 60000) / 1000).toFixed(0); return `${minutes}:${seconds.padStart(2, '0')}`; @@ -506,7 +506,7 @@ function attachTrackActionListeners(isGlobalWatchEnabled: boolean) { } } catch (error) { // Revert UI on error if needed, error is shown by handlers - showError('Failed to update track status. Please try again.'); + showError('Failed to update track status. Please try again.'); } button.disabled = false; }); @@ -562,15 +562,15 @@ async function downloadWholePlaylist(playlist: Playlist) { if (!playlist) { throw new Error('Invalid playlist data'); } - + const playlistId = playlist.id || ''; if (!playlistId) { throw new Error('Missing playlist ID'); } - + try { // Use the centralized downloadQueue.download method - await downloadQueue.download(playlistId, 'playlist', { + await downloadQueue.download(playlistId, 'playlist', { name: playlist.name || 'Unknown Playlist', owner: playlist.owner?.display_name // Pass owner as a string // total_tracks can also be passed if QueueItem supports it directly @@ -593,12 +593,12 @@ async function downloadPlaylistAlbums(playlist: Playlist) { showError('No tracks found in this playlist.'); return; } - + // Build a map of unique albums (using album ID as the key). const albumMap = new Map(); playlist.tracks.items.forEach((item: PlaylistItem) => { if (!item?.track?.album) return; - + const album = item.track.album; if (album && album.id) { albumMap.set(album.id, album); @@ -624,18 +624,18 @@ async function downloadPlaylistAlbums(playlist: Playlist) { for (let i = 0; i < totalAlbums; i++) { const album = uniqueAlbums[i]; if (!album) continue; - + const albumUrl = album.external_urls?.spotify || ''; if (!albumUrl) continue; - + // Use the centralized downloadQueue.download method await downloadQueue.download( album.id, // Pass album ID directly 'album', - { + { name: album.name || 'Unknown Album', // If artist information is available on album objects from playlist, pass it - // artist: album.artists?.[0]?.name + // artist: album.artists?.[0]?.name } ); @@ -652,7 +652,7 @@ async function downloadPlaylistAlbums(playlist: Playlist) { if (downloadAlbumsBtn) { downloadAlbumsBtn.textContent = 'Queued!'; } - + // Make the queue visible after queueing all albums downloadQueue.toggleVisibility(true); } catch (error: any) { @@ -669,11 +669,11 @@ async function startDownload(itemId: string, type: string, item: DownloadQueueIt showError('Missing ID or type for download'); return; } - + try { // Use the centralized downloadQueue.download method await downloadQueue.download(itemId, type, item, albumType); - + // Make the queue visible after queueing downloadQueue.toggleVisibility(true); } catch (error: any) { @@ -706,7 +706,7 @@ async function fetchWatchStatus(playlistId: string) { console.error('Error fetching watch status:', error); // Don't show a blocking error, but maybe a small notification or log // For now, assume not watched if status fetch fails, or keep buttons in default state - updateWatchButtons(false, playlistId); + updateWatchButtons(false, playlistId); } } diff --git a/src/js/queue.ts b/src/js/queue.ts index 3f0f594..eeab46f 100644 --- a/src/js/queue.ts +++ b/src/js/queue.ts @@ -144,31 +144,31 @@ export class DownloadQueue { // Cache for queue items queueCache: Record = {}; - + // Queue entry objects queueEntries: Record = {}; - + // Polling intervals for progress tracking pollingIntervals: Record = {}; // NodeJS.Timeout for setInterval - + // DOM elements cache (Consider if this is still needed or how it's used) elements: Record = {}; // Example type, adjust as needed - + // Event handlers (Consider if this is still needed or how it's used) eventHandlers: Record = {}; // Example type, adjust as needed - + // Configuration config: AppConfig = {}; // Initialize with an empty object or a default config structure - + // Load the saved visible count (or default to 10) visibleCount: number; - + constructor() { const storedVisibleCount = localStorage.getItem("downloadQueueVisibleCount"); this.visibleCount = storedVisibleCount ? parseInt(storedVisibleCount, 10) : 10; - + this.queueCache = JSON.parse(localStorage.getItem("downloadQueueCache") || "{}"); - + // Constants read from the server config this.MAX_RETRIES = 3; // Default max retries this.RETRY_DELAY = 5; // Default retry delay in seconds @@ -176,29 +176,29 @@ export class DownloadQueue { // Cache for queue items // this.queueCache = {}; // Already initialized above - + // Queue entry objects this.queueEntries = {}; - + // Polling intervals for progress tracking this.pollingIntervals = {}; - + // DOM elements cache this.elements = {}; - + // Event handlers this.eventHandlers = {}; - + // Configuration this.config = {}; // Initialize config - + // Load the saved visible count (or default to 10) - This block is redundant // const storedVisibleCount = localStorage.getItem("downloadQueueVisibleCount"); // this.visibleCount = storedVisibleCount ? parseInt(storedVisibleCount, 10) : 10; - + // Load the cached status info (object keyed by prgFile) - This is also redundant // this.queueCache = JSON.parse(localStorage.getItem("downloadQueueCache") || "{}"); - + // Wait for initDOM to complete before setting up event listeners and loading existing PRG files. this.initDOM().then(() => { this.initEventListeners(); @@ -246,7 +246,7 @@ export class DownloadQueue { queueSidebar.hidden = !this.config.downloadQueueVisible; queueSidebar.classList.toggle('active', !!this.config.downloadQueueVisible); } - + // Initialize the queue icon based on sidebar visibility const queueIcon = document.getElementById('queueIcon'); if (queueIcon && this.config) { @@ -287,7 +287,7 @@ export class DownloadQueue { if (logElement) { logElement.textContent = "Cancelling..."; } - + // Cancel each active download fetch(`/api/${entry.type}/download/cancel?prg_file=${entry.prgFile}`) .then(response => response.json()) @@ -309,7 +309,7 @@ export class DownloadQueue { this.clearAllPollingIntervals(); }); } - + // Close all SSE connections when the page is about to unload window.addEventListener('beforeunload', () => { this.clearAllPollingIntervals(); @@ -322,7 +322,7 @@ export class DownloadQueue { if (!queueSidebar) return; // Guard against null // If force is provided, use that value, otherwise toggle the current state const isVisible = force !== undefined ? force : !queueSidebar.classList.contains('active'); - + queueSidebar.classList.toggle('active', isVisible); queueSidebar.hidden = !isVisible; @@ -394,12 +394,12 @@ export class DownloadQueue { this.queueEntries[queueId] = entry; // Re-render and update which entries are processed. this.updateQueueOrder(); - + // Start monitoring if explicitly requested, regardless of visibility if (startMonitoring) { this.startDownloadStatusMonitoring(queueId); } - + this.dispatchEvent('downloadAdded', { queueId, item, type }); return queueId; // Return the queueId so callers can reference it } @@ -408,10 +408,10 @@ export class DownloadQueue { async startDownloadStatusMonitoring(queueId: string) { const entry = this.queueEntries[queueId]; if (!entry || entry.hasEnded) return; - + // Don't restart monitoring if polling interval already exists if (this.pollingIntervals[queueId]) return; - + // Ensure entry has data containers for parent info entry.parentInfo = entry.parentInfo || null; @@ -422,19 +422,19 @@ export class DownloadQueue { logElement.textContent = "Initializing download..."; } } - + console.log(`Starting monitoring for ${entry.type} with PRG file: ${entry.prgFile}`); - + // For backward compatibility, first try to get initial status from the REST API try { const response = await fetch(`/api/prgs/${entry.prgFile}`); if (response.ok) { const data: StatusData = await response.json(); // Add type to data - + // Update entry type if available if (data.type) { entry.type = data.type; - + // Update type display if element exists const typeElement = entry.element.querySelector('.type') as HTMLElement | null; if (typeElement) { @@ -442,7 +442,7 @@ export class DownloadQueue { typeElement.className = `type ${data.type}`; } } - + // Update request URL if available if (!entry.requestUrl && data.original_request) { const params = new CustomURLSearchParams(); @@ -451,46 +451,46 @@ export class DownloadQueue { } entry.requestUrl = `/api/${entry.type}/download?${params.toString()}`; } - + // Override requestUrl with server original_url if provided if (data.original_url) { entry.requestUrl = data.original_url; } - + // Process the initial status if (data.last_line) { entry.lastStatus = data.last_line; entry.lastUpdated = Date.now(); entry.status = data.last_line.status || 'unknown'; // Ensure status is not undefined - + // Update status message without recreating the element const logElement = document.getElementById(`log-${entry.uniqueId}-${entry.prgFile}`) as HTMLElement | null; if (logElement) { const statusMessage = this.getStatusMessage(data.last_line); logElement.textContent = statusMessage; } - + // Apply appropriate CSS classes based on status this.applyStatusClasses(entry, data.last_line); - + // Save updated status to cache, ensuring we preserve parent data this.queueCache[entry.prgFile] = { ...data.last_line, // Ensure parent data is preserved parent: data.last_line.parent || entry.lastStatus?.parent }; - + // If this is a track with a parent, update the display elements to match the parent if (data.last_line.type === 'track' && data.last_line.parent) { const parent = data.last_line.parent; entry.parentInfo = parent; - + // Update type and UI to reflect the parent type if (parent.type === 'album' || parent.type === 'playlist') { // Only change type if it's not already set to the parent type if (entry.type !== parent.type) { entry.type = parent.type; - + // Update the type indicator const typeEl = entry.element.querySelector('.type') as HTMLElement | null; if (typeEl) { @@ -498,11 +498,11 @@ export class DownloadQueue { typeEl.textContent = displayType; typeEl.className = `type ${parent.type}`; } - + // Update the title and subtitle based on parent type const titleEl = entry.element.querySelector('.title') as HTMLElement | null; const artistEl = entry.element.querySelector('.artist') as HTMLElement | null; - + if (parent.type === 'album') { if (titleEl) titleEl.textContent = parent.title || 'Unknown album'; if (artistEl) artistEl.textContent = parent.artist || 'Unknown artist'; @@ -513,9 +513,9 @@ export class DownloadQueue { } } } - + localStorage.setItem("downloadQueueCache", JSON.stringify(this.queueCache)); - + // If the entry is already in a terminal state, don't set up polling if (['error', 'complete', 'cancel', 'cancelled', 'done'].includes(data.last_line.status || '')) { // Add null check for status entry.hasEnded = true; @@ -527,7 +527,7 @@ export class DownloadQueue { } catch (error) { console.error('Initial status check failed:', error); } - + // Set up polling interval for real-time updates this.setupPollingInterval(queueId); } @@ -542,10 +542,10 @@ export class DownloadQueue { */ createQueueEntry(item: QueueItem, type: string, prgFile: string, queueId: string, requestUrl: string | null): QueueEntry { console.log(`Creating queue entry with initial type: ${type}`); - + // Get cached data if it exists const cachedData: StatusData | undefined = this.queueCache[prgFile]; // Add type - + // If we have cached data, use it to determine the true type and item properties if (cachedData) { // If this is a track with a parent, update type and item to match the parent @@ -583,11 +583,11 @@ export class DownloadQueue { }; } } - + // Build the basic entry with possibly updated type and item const entry: QueueEntry = { // Add type to entry item, - type, + type, prgFile, requestUrl, // for potential retry element: this.createQueueItem(item, type, prgFile, queueId), @@ -614,26 +614,26 @@ export class DownloadQueue { parentInfo: null, // Will store parent data for tracks that are part of albums/playlists realTimeStallDetector: { count: 0, lastStatusJson: '' } // For detecting stalled real_time downloads }; - + // If cached info exists for this PRG file, use it. if (cachedData) { entry.lastStatus = cachedData; const logEl = entry.element.querySelector('.log') as HTMLElement | null; - + // Store parent information if available if (cachedData.parent) { entry.parentInfo = cachedData.parent; } - + // Render status message for cached data if (logEl) { // Check if logEl is not null logEl.textContent = this.getStatusMessage(entry.lastStatus); } } - + // Store it in our queue object this.queueEntries[queueId] = entry; - + return entry; } @@ -644,18 +644,18 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) // Track whether this is a multi-track item (album or playlist) const isMultiTrack = type === 'album' || type === 'playlist'; const defaultMessage = (type === 'playlist') ? 'Reading track list' : 'Initializing download...'; - + // Use display values if available, or fall back to standard fields const displayTitle = item.name || item.music || item.song || 'Unknown'; const displayArtist = item.artist || ''; const displayType = type.charAt(0).toUpperCase() + type.slice(1); - + const div = document.createElement('article') as HTMLElement; // Cast to HTMLElement div.className = 'queue-item queue-item-new'; // Add the animation class div.setAttribute('aria-live', 'polite'); div.setAttribute('aria-atomic', 'true'); div.setAttribute('data-type', type); - + // Create modern HTML structure with better visual hierarchy let innerHtml = `
@@ -668,25 +668,25 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) Cancel Download
- +
${defaultMessage}
- + - +
-
- +
`; - + // For albums and playlists, add an overall progress container if (isMultiTrack) { innerHtml += ` @@ -696,21 +696,21 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) 0/0
-
`; } - + div.innerHTML = innerHtml; - + (div.querySelector('.cancel-btn') as HTMLButtonElement | null)?.addEventListener('click', (e: MouseEvent) => this.handleCancelDownload(e)); // Add types and optional chaining - + // Remove the animation class after animation completes setTimeout(() => { div.classList.remove('queue-item-new'); }, 300); // Match the animation duration - + return div; } @@ -718,13 +718,13 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) applyStatusClasses(entry: QueueEntry, statusData: StatusData) { // Add types for statusData // If no element, nothing to do if (!entry.element) return; - + // Remove all status classes first entry.element.classList.remove( - 'queued', 'initializing', 'downloading', 'processing', + 'queued', 'initializing', 'downloading', 'processing', 'error', 'complete', 'cancelled', 'progress' ); - + // Handle various status types switch (statusData.status) { // Use statusData.status case 'queued': @@ -751,7 +751,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } break; case 'complete': - case 'done': + case 'done': entry.element.classList.add('complete'); // Hide error details if present if (entry.element) { @@ -788,13 +788,13 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) // Add a visual indication that it's being cancelled entry.element.classList.add('cancelling'); } - + // Show cancellation in progress const logElement = document.getElementById(`log-${queueid}-${prg}`) as HTMLElement | null; if (logElement) { logElement.textContent = "Cancelling..."; } - + // First cancel the download const response = await fetch(`/api/${type}/download/cancel?prg_file=${prg}`); const data = await response.json(); @@ -802,20 +802,20 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (data.status === "cancelled" || data.status === "cancel") { if (entry) { entry.hasEnded = true; - + // Close any active connections this.clearPollingInterval(queueid); - + if (entry.intervalId) { clearInterval(entry.intervalId as number); // Cast to number entry.intervalId = null; } - + // Mark as cancelled in the cache to prevent re-loading on page refresh entry.status = "cancelled"; this.queueCache[prg] = { status: "cancelled" }; localStorage.setItem("downloadQueueCache", JSON.stringify(this.queueCache)); - + // Immediately remove the item from the UI this.cleanupEntry(queueid); } @@ -862,16 +862,16 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (queueTotalCountEl) { queueTotalCountEl.textContent = entries.length.toString(); } - + // Remove subtitle with detailed stats if it exists const subtitleEl = document.getElementById('queueSubtitle'); if (subtitleEl) { subtitleEl.remove(); } - + // Only recreate the container content if really needed const visibleEntries = entries.slice(0, this.visibleCount); - + // Handle empty state if (entries.length === 0) { container.innerHTML = ` @@ -883,7 +883,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } else { // Get currently visible items const visibleItems = Array.from(container.children).filter(el => el.classList.contains('queue-item')); - + // Update container more efficiently if (visibleItems.length === 0) { // No items in container, append all visible entries @@ -895,31 +895,31 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) }); } else { // Container already has items, update more efficiently - + // Create a map of current DOM elements by queue ID const existingElementMap: { [key: string]: HTMLElement } = {}; visibleItems.forEach(el => { const queueId = (el.querySelector('.cancel-btn') as HTMLElement | null)?.dataset.queueid; // Optional chaining if (queueId) existingElementMap[queueId] = el as HTMLElement; // Cast to HTMLElement }); - + // Clear container to re-add in correct order container.innerHTML = ''; - + // Add visible entries in correct order visibleEntries.forEach((entry: QueueEntry) => { // We no longer automatically start monitoring here container.appendChild(entry.element); - + // Mark the entry as not new anymore entry.isNew = false; }); } } - + // We no longer start or stop monitoring based on visibility changes here // This allows the explicit monitoring control from the download methods - + // Ensure all currently visible and active entries are being polled // This is important for items that become visible after "Show More" or other UI changes Object.values(this.queueEntries).forEach(entry => { @@ -928,7 +928,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) this.setupPollingInterval(entry.uniqueId); } }); - + // Update footer footer.innerHTML = ''; if (entries.length > this.visibleCount) { @@ -979,7 +979,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (entry) { // Close any polling interval this.clearPollingInterval(queueId); - + // Clean up any intervals if (entry.intervalId) { clearInterval(entry.intervalId as number); // Cast to number @@ -987,19 +987,19 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (entry.autoRetryInterval) { clearInterval(entry.autoRetryInterval as number); // Cast to number } - + // Remove from the DOM entry.element.remove(); - + // Delete from in-memory queue delete this.queueEntries[queueId]; - + // Remove the cached info if (this.queueCache[entry.prgFile]) { delete this.queueCache[entry.prgFile]; localStorage.setItem("downloadQueueCache", JSON.stringify(this.queueCache)); } - + // Update the queue display this.updateQueueOrder(); } @@ -1016,43 +1016,43 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) // show it as part of the parent's download process let displayType = data.type || 'unknown'; let isChildTrack = false; - + // If this is a track that's part of an album/playlist, note that if (data.type === 'track' && data.parent) { isChildTrack = true; // We'll still use track-specific info but note it's part of a parent } - + // Find the queue item this status belongs to let queueItem: QueueEntry | null = null; - const prgFile = data.prg_file || Object.keys(this.queueCache).find(key => + const prgFile = data.prg_file || Object.keys(this.queueCache).find(key => this.queueCache[key].status === data.status && this.queueCache[key].type === data.type ); - + if (prgFile) { - const queueId = Object.keys(this.queueEntries).find(id => + const queueId = Object.keys(this.queueEntries).find(id => this.queueEntries[id].prgFile === prgFile ); if (queueId) { queueItem = this.queueEntries[queueId]; } } - + // Extract common fields - const trackName = data.song || data.music || data.name || data.title || + const trackName = data.song || data.music || data.name || data.title || (queueItem?.item?.name) || 'Unknown'; - const artist = data.artist || data.artist_name || + const artist = data.artist || data.artist_name || (queueItem?.item?.artist) || ''; - const albumTitle = data.title || data.album || data.parent?.title || data.name || + const albumTitle = data.title || data.album || data.parent?.title || data.name || (queueItem?.item?.name) || ''; - const playlistName = data.name || data.parent?.name || + const playlistName = data.name || data.parent?.name || (queueItem?.item?.name) || ''; - const playlistOwner = data.owner || data.parent?.owner || + const playlistOwner = data.owner || data.parent?.owner || (queueItem?.item?.owner) || ''; // Add type check if item.owner is object const currentTrack = data.current_track || data.parsed_current_track || ''; - const totalTracks = data.total_tracks || data.parsed_total_tracks || data.parent?.total_tracks || + const totalTracks = data.total_tracks || data.parsed_total_tracks || data.parent?.total_tracks || (queueItem?.item?.total_tracks) || ''; - + // Format percentage for display when available let formattedPercentage = '0'; if (data.progress !== undefined) { @@ -1062,11 +1062,11 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } else if (data.percent) { formattedPercentage = (parseFloat(data.percent as string) * 100).toFixed(1); // Cast to string } - + // Helper for constructing info about the parent item const getParentInfo = (): string => { // Add return type if (!data.parent) return ''; - + if (data.parent.type === 'album') { return ` from album "${data.parent.title}"`; } else if (data.parent.type === 'playlist') { @@ -1074,7 +1074,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } return ''; }; - + // Status-based message generation switch (data.status) { case 'queued': @@ -1086,10 +1086,10 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) return `Queued playlist "${playlistName}"${playlistOwner ? ` by ${playlistOwner}` : ''} (${totalTracks || '?'} tracks)`; } return `Queued ${data.type}`; - + case 'initializing': return `Preparing to download...`; - + case 'processing': // Special case: If this is a track that's part of an album/playlist if (data.type === 'track' && data.parent) { @@ -1099,11 +1099,11 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) return `Processing track ${currentTrack}/${totalTracks}: "${trackName}" by ${artist} (from playlist "${data.parent.name}")`; } } - + // Regular standalone track if (data.type === 'track') { return `Processing track "${trackName}"${artist ? ` by ${artist}` : ''}${getParentInfo()}`; - } + } // Album download else if (data.type === 'album') { // For albums, show current track info if available @@ -1116,7 +1116,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) return `Processing album "${albumTitle}" (${totalTracks} tracks)`; } return `Processing album "${albumTitle}"...`; - } + } // Playlist download else if (data.type === 'playlist') { // For playlists, show current track info if available @@ -1131,7 +1131,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) return `Processing playlist "${playlistName}"...`; } return `Processing ${data.type}...`; - + case 'progress': // Special case: If this is a track that's part of an album/playlist if (data.type === 'track' && data.parent) { @@ -1141,11 +1141,11 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) return `Downloading track ${currentTrack}/${totalTracks}: "${trackName}" by ${artist} (from playlist "${data.parent.name}")`; } } - + // Regular standalone track if (data.type === 'track') { return `Downloading track "${trackName}"${artist ? ` by ${artist}` : ''}${getParentInfo()}`; - } + } // Album download else if (data.type === 'album') { // For albums, show current track info if available @@ -1158,7 +1158,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) return `Downloading album "${albumTitle}" (${totalTracks} tracks)`; } return `Downloading album "${albumTitle}"...`; - } + } // Playlist download else if (data.type === 'playlist') { // For playlists, show current track info if available @@ -1173,7 +1173,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) return `Downloading playlist "${playlistName}"...`; } return `Downloading ${data.type}...`; - + case 'real-time': case 'real_time': // Special case: If this is a track that's part of an album/playlist @@ -1184,26 +1184,26 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) return `Downloading track ${currentTrack}/${totalTracks}: "${trackName}" by ${artist} - ${formattedPercentage}% (from playlist "${data.parent.name}")`; } } - + // Regular standalone track if (data.type === 'track') { return `Downloading "${trackName}" - ${formattedPercentage}%${getParentInfo()}`; - } + } // Album with track info else if (data.type === 'album' && trackName && artist) { return `Downloading ${currentTrack}/${totalTracks}: "${trackName}" by ${artist} - ${formattedPercentage}%`; - } + } // Playlist with track info else if (data.type === 'playlist' && trackName && artist) { return `Downloading ${currentTrack}/${totalTracks}: "${trackName}" by ${artist} - ${formattedPercentage}%`; - } + } // Generic with percentage else { - const itemName = data.type === 'album' ? albumTitle : + const itemName = data.type === 'album' ? albumTitle : (data.type === 'playlist' ? playlistName : data.type); return `Downloading ${data.type} "${itemName}" - ${formattedPercentage}%`; } - + case 'done': case 'complete': if (data.type === 'track') { @@ -1214,19 +1214,19 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) return `Downloaded playlist "${playlistName}"${playlistOwner ? ` by ${playlistOwner}` : ''} successfully (${totalTracks} tracks)`; } return `Downloaded ${data.type} successfully`; - + case 'skipped': return `${trackName}${artist ? ` by ${artist}` : ''} was skipped: ${data.reason || 'Unknown reason'}`; - + case 'error': // Enhanced error message handling using the new format let errorMsg = `Error: ${data.error}`; - + // Add position information for tracks in collections if (data.current_track && data.total_tracks) { errorMsg = `Error on track ${data.current_track}/${data.total_tracks}: ${data.error}`; } - + // Add retry information if available if (data.retry_count !== undefined) { errorMsg += ` (Attempt ${data.retry_count}/${this.MAX_RETRIES})`; @@ -1237,7 +1237,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) errorMsg += ` (Max retries reached)`; } } - + // Add parent information if this is a track with a parent if (data.type === 'track' && data.parent) { if (data.parent.type === 'album') { @@ -1246,14 +1246,14 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) errorMsg += `\nFrom playlist: "${data.parent.name}" by ${data.parent.owner || 'Unknown creator'}`; } } - + // Add URL for troubleshooting if available if (data.url) { errorMsg += `\nSource: ${data.url}`; } - + return errorMsg; - + case 'retrying': let retryMsg = 'Retrying'; if (data.retry_count) { @@ -1266,11 +1266,11 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) retryMsg += `: ${data.error}`; } return retryMsg; - + case 'cancelled': case 'cancel': return 'Cancelling...'; - + default: return data.status || 'Unknown status'; } @@ -1280,7 +1280,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) handleDownloadCompletion(entry: QueueEntry, queueId: string, progress: StatusData | number) { // Add types // Mark the entry as ended entry.hasEnded = true; - + // Update progress bar if available if (typeof progress === 'number') { const progressBar = entry.element.querySelector('.progress-bar') as HTMLElement | null; @@ -1290,15 +1290,15 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) progressBar.classList.add('bg-success'); } } - + // Stop polling this.clearPollingInterval(queueId); - + // Use 3 seconds cleanup delay for completed, 10 seconds for other terminal states like errors const cleanupDelay = (progress && typeof progress !== 'number' && (progress.status === 'complete' || progress.status === 'done')) ? 3000 : (progress && typeof progress !== 'number' && (progress.status === 'cancelled' || progress.status === 'cancel' || progress.status === 'skipped')) ? 20000 : 10000; // Default for other errors if not caught by the more specific error handler delay - + // Clean up after the appropriate delay setTimeout(() => { this.cleanupEntry(queueId); @@ -1336,7 +1336,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) const retryBtn = logElement?.querySelector('.retry-btn') as HTMLButtonElement | null; entry.isRetrying = true; // Mark the original entry as being retried. - + // Determine if we should use parent information for retry (existing logic) let useParent = false; let parentType: string | null = null; // Add type @@ -1350,7 +1350,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) console.log(`Using parent info for retry: ${parentType} with URL: ${parentUrl}`); } } - + const getRetryUrl = (): string | null => { // Add return type if (entry.lastStatus && entry.lastStatus.original_url) return entry.lastStatus.original_url; if (useParent && parentUrl) return parentUrl; @@ -1362,9 +1362,9 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (entry.lastStatus && entry.lastStatus.url) return entry.lastStatus.url; return null; }; - + const retryUrl = getRetryUrl(); - + if (!retryUrl) { if (errorMessageDiv) errorMessageDiv.textContent = 'Retry not available: missing URL information.'; entry.isRetrying = false; @@ -1374,12 +1374,12 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } return; } - + // Store details needed for the new entry BEFORE any async operations const originalItem: QueueItem = { ...entry.item }; // Shallow copy, add type const apiTypeForNewEntry = useParent && parentType ? parentType : entry.type; // Ensure parentType is not null console.log(`Retrying download using type: ${apiTypeForNewEntry} with base URL: ${retryUrl}`); - + let fullRetryUrl; if (retryUrl.startsWith('http') || retryUrl.startsWith('/api/')) { // if it's already a full URL or an API path fullRetryUrl = retryUrl; @@ -1405,19 +1405,19 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) const errorText = await retryResponse.text(); throw new Error(`Server returned ${retryResponse.status}${errorText ? (': ' + errorText) : ''}`); } - + const retryData: StatusData = await retryResponse.json(); // Add type - + if (retryData.prg_file) { const newPrgFile = retryData.prg_file; - + // Clean up the old entry from UI, memory, cache, and server (PRG file) // logElement and retryBtn are part of the old entry's DOM structure and will be removed. await this.cleanupEntry(queueId); - + // Add the new download entry. This will create a new element, start monitoring, etc. this.addDownload(originalItem, apiTypeForNewEntry, newPrgFile, requestUrlForNewEntry, true); - + // The old setTimeout block for deleting oldPrgFile is no longer needed as cleanupEntry handles it. } else { if (errorMessageDiv) errorMessageDiv.textContent = 'Retry failed: invalid response from server.'; @@ -1446,7 +1446,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) retryButtonOnFailedEntry.disabled = false; retryButtonOnFailedEntry.innerHTML = 'Retry'; } - } else if (errorMessageDiv) { + } else if (errorMessageDiv) { // Fallback if entry is gone from queue but original logElement's parts are somehow still accessible errorMessageDiv.textContent = 'Retry failed: ' + (error as Error).message; if (retryBtn) { @@ -1479,18 +1479,18 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (!itemId) { throw new Error('Missing ID for download'); } - + await this.loadConfig(); // Construct the API URL in the new format /api/{type}/download/{itemId} let apiUrl = `/api/${type}/download/${itemId}`; - + // Prepare query parameters const queryParams = new URLSearchParams(); // item.name and item.artist are no longer sent as query parameters // if (item.name && item.name.trim() !== '') queryParams.append('name', item.name); // if (item.artist && item.artist.trim() !== '') queryParams.append('artist', item.artist); - + // For artist downloads, include album_type as it may still be needed if (type === 'artist' && albumType) { queryParams.append('album_type', albumType); @@ -1500,7 +1500,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (queryString) { apiUrl += `?${queryString}`; } - + console.log(`Constructed API URL for download: ${apiUrl}`); // Log the constructed URL try { @@ -1509,30 +1509,30 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (queueIcon) { queueIcon.classList.add('queue-icon-active'); } - + const response = await fetch(apiUrl); if (!response.ok) { throw new Error(`Server returned ${response.status}`); } - + const data: StatusData | { task_ids?: string[], album_prg_files?: string[] } = await response.json(); // Add type for data - + // Handle artist downloads which return multiple album tasks if (type === 'artist') { // Check for new API response format if ('task_ids' in data && data.task_ids && Array.isArray(data.task_ids)) { // Type guard console.log(`Queued artist discography with ${data.task_ids.length} albums`); - + // Make queue visible to show progress this.toggleVisibility(true); - + // Create entries directly from task IDs and start monitoring them const queueIds: string[] = []; // Add type for (const taskId of data.task_ids) { console.log(`Adding album task with ID: ${taskId}`); // Create an album item with better display information const albumItem: QueueItem = { // Add type - name: `${item.name || 'Artist'} - Album (loading...)`, + name: `${item.name || 'Artist'} - Album (loading...)`, artist: item.name || 'Unknown artist', type: 'album' }; @@ -1540,23 +1540,23 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) const queueId = this.addDownload(albumItem, 'album', taskId, apiUrl, true); queueIds.push(queueId); } - + return queueIds; - } + } // Check for older API response format else if ('album_prg_files' in data && data.album_prg_files && Array.isArray(data.album_prg_files)) { // Type guard console.log(`Queued artist discography with ${data.album_prg_files.length} albums (old format)`); - + // Make queue visible to show progress this.toggleVisibility(true); - + // Add each album to the download queue separately with forced monitoring const queueIds: string[] = []; // Add type data.album_prg_files.forEach(prgFile => { console.log(`Adding album with PRG file: ${prgFile}`); // Create an album item with better display information const albumItem: QueueItem = { // Add type - name: `${item.name || 'Artist'} - Album (loading...)`, + name: `${item.name || 'Artist'} - Album (loading...)`, artist: item.name || 'Unknown artist', type: 'album' }; @@ -1564,19 +1564,19 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) const queueId = this.addDownload(albumItem, 'album', prgFile, apiUrl, true); queueIds.push(queueId); }); - + return queueIds; } // Handle any other response format for artist downloads else { console.log(`Queued artist discography with unknown format:`, data); - + // Make queue visible this.toggleVisibility(true); - + // Just load existing PRG files as a fallback await this.loadExistingPrgFiles(); - + // Force start monitoring for all loaded entries for (const queueId in this.queueEntries) { const entry = this.queueEntries[queueId]; @@ -1584,15 +1584,15 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) this.startDownloadStatusMonitoring(queueId); } } - + return data; } } - + // Handle single-file downloads (tracks, albums, playlists) if ('prg_file' in data && data.prg_file) { // Type guard console.log(`Adding ${type} PRG file: ${data.prg_file}`); - + // Store the initial metadata in the cache so it's available // even before the first status update this.queueCache[data.prg_file] = { @@ -1604,15 +1604,15 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) owner: typeof item.owner === 'string' ? item.owner : item.owner?.display_name || '', total_tracks: item.total_tracks || 0 }; - + // Use direct monitoring for all downloads for consistency const queueId = this.addDownload(item, type, data.prg_file, apiUrl, true); - + // Make queue visible to show progress if not already visible if (this.config && !this.config.downloadQueueVisible) { // Add null check for config this.toggleVisibility(true); } - + return queueId; } else { throw new Error('Invalid response format from server'); @@ -1634,7 +1634,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) this.clearPollingInterval(queueId); delete this.queueEntries[queueId]; } - + // Fetch detailed task list from the new endpoint const response = await fetch('/api/prgs/list'); if (!response.ok) { @@ -1697,7 +1697,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) }; } } - + let retryCount = 0; if (lastStatus && lastStatus.retry_count) { retryCount = lastStatus.retry_count; @@ -1709,7 +1709,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } const requestUrl = originalRequest.url ? `/api/${itemType}/download/${originalRequest.url.split('/').pop()}?name=${encodeURIComponent(dummyItem.name || '')}&artist=${encodeURIComponent(dummyItem.artist || '')}` : null; - + const queueId = this.generateQueueId(); const entry = this.createQueueEntry(dummyItem, itemType, prgFile, queueId, requestUrl); entry.retryCount = retryCount; @@ -1729,7 +1729,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } this.queueEntries[queueId] = entry; } - + localStorage.setItem("downloadQueueCache", JSON.stringify(this.queueCache)); this.updateQueueOrder(); this.startMonitoringActiveEntries(); @@ -1743,7 +1743,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) const response = await fetch('/api/config'); if (!response.ok) throw new Error('Failed to fetch config'); this.config = await response.json(); - + // Update our retry constants from the server config if (this.config.maxRetries !== undefined) { this.MAX_RETRIES = this.config.maxRetries; @@ -1754,7 +1754,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (this.config.retry_delay_increase !== undefined) { this.RETRY_DELAY_INCREASE = this.config.retry_delay_increase; } - + console.log(`Loaded retry settings from config: max=${this.MAX_RETRIES}, delay=${this.RETRY_DELAY}, increase=${this.RETRY_DELAY_INCREASE}`); } catch (error) { console.error('Error loading config:', error); @@ -1796,19 +1796,19 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) console.warn(`No entry or prgFile for ${queueId}`); return; } - + // Close any existing connection this.clearPollingInterval(queueId); - + try { // Immediately fetch initial data this.fetchDownloadStatus(queueId); - + // Create a polling interval of 500ms for more responsive UI updates const intervalId = setInterval(() => { this.fetchDownloadStatus(queueId); }, 500); - + // Store the interval ID for later cleanup this.pollingIntervals[queueId] = intervalId as unknown as number; // Cast to number via unknown } catch (error) { @@ -1820,22 +1820,22 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } } } - + async fetchDownloadStatus(queueId: string) { // Add type const entry = this.queueEntries[queueId]; if (!entry || !entry.prgFile) { console.warn(`No entry or prgFile for ${queueId}`); return; } - + try { const response = await fetch(`/api/prgs/${entry.prgFile}`); if (!response.ok) { throw new Error(`HTTP error: ${response.status}`); } - + const data: StatusData = await response.json(); // Add type - + // If the last_line doesn't have name/artist/title info, add it from our stored item data if (data.last_line && entry.item) { if (!data.last_line.name && entry.item.name) { @@ -1856,12 +1856,12 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) data.last_line.total_tracks = entry.item.total_tracks; } } - + // Initialize the download type if needed if (data.type && !entry.type) { console.log(`Setting entry type to: ${data.type}`); entry.type = data.type; - + // Update type display if element exists const typeElement = entry.element.querySelector('.type') as HTMLElement | null; if (typeElement) { @@ -1870,7 +1870,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) typeElement.className = `type ${data.type}`; } } - + // Special handling for track updates that are part of an album/playlist // Don't filter these out as they contain important track progress info if (data.last_line && data.last_line.type === 'track' && data.last_line.parent) { @@ -1882,21 +1882,21 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } } // Only filter out updates that don't match entry type AND don't have a relevant parent - else if (data.last_line && data.last_line.type && entry.type && - data.last_line.type !== entry.type && + else if (data.last_line && data.last_line.type && entry.type && + data.last_line.type !== entry.type && (!data.last_line.parent || data.last_line.parent.type !== entry.type)) { console.log(`Skipping status update with type '${data.last_line.type}' for entry with type '${entry.type}'`); return; } - + // Process the update this.handleStatusUpdate(queueId, data); - + // Handle terminal states if (data.last_line && ['complete', 'error', 'cancelled', 'done'].includes(data.last_line.status || '')) { // Add null check console.log(`Terminal state detected: ${data.last_line.status} for ${queueId}`); entry.hasEnded = true; - + // For cancelled downloads, clean up immediately if (data.last_line.status === 'cancelled' || data.last_line.status === 'cancel') { console.log('Cleaning up cancelled download immediately'); @@ -1904,13 +1904,13 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) this.cleanupEntry(queueId); return; // No need to process further } - + // Only set up cleanup if this is not an error that we're in the process of retrying // If status is 'error' but the status message contains 'Retrying', don't clean up - const isRetrying = entry.isRetrying || - (data.last_line.status === 'error' && + const isRetrying = entry.isRetrying || + (data.last_line.status === 'error' && entry.element.querySelector('.log')?.textContent?.includes('Retry')); - + if (!isRetrying) { setTimeout(() => { // Double-check the entry still exists and has not been retried before cleaning up @@ -1924,10 +1924,10 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) }, data.last_line.status === 'complete' || data.last_line.status === 'done' ? 3000 : 5000); // 3s for complete/done, 5s for others } } - + } catch (error) { console.error(`Error fetching status for ${queueId}:`, error); - + // Show error in log const logElement = document.getElementById(`log-${entry.uniqueId}-${entry.prgFile}`) as HTMLElement | null; if (logElement) { @@ -1935,7 +1935,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } } } - + clearPollingInterval(queueId: string) { // Add type if (this.pollingIntervals[queueId]) { console.log(`Stopping polling for ${queueId}`); @@ -1955,10 +1955,10 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) console.warn(`No entry for ${queueId}`); return; } - + // Extract the actual status data from the API response const statusData: StatusData = data.last_line || {}; // Add type - + // Special handling for track status updates that are part of an album/playlist // We want to keep these for showing the track-by-track progress if (statusData.type === 'track' && statusData.parent) { @@ -1969,12 +1969,12 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } } // Only skip updates where type doesn't match AND there's no relevant parent relationship - else if (statusData.type && entry.type && statusData.type !== entry.type && + else if (statusData.type && entry.type && statusData.type !== entry.type && (!statusData.parent || statusData.parent.type !== entry.type)) { console.log(`Skipping mismatched type: update=${statusData.type}, entry=${entry.type}`); return; } - + // Get primary status let status = statusData.status || data.event || 'unknown'; // Define status *before* potential modification @@ -1990,12 +1990,12 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) current_track: (entry.type === 'album' || entry.type === 'playlist') ? statusData.current_track : undefined, // Include other relevant fields if they signify activity, e.g., speed, eta // For example, if statusData.song changes for an album, that's progress. - song: statusData.song + song: statusData.song }; const currentMetricsJson = JSON.stringify(currentMetrics); // Check if significant metrics are present and static - if (detector.lastStatusJson === currentMetricsJson && + if (detector.lastStatusJson === currentMetricsJson && (currentMetrics.progress !== undefined || currentMetrics.time_elapsed !== undefined || currentMetrics.current_track !== undefined || currentMetrics.song !== undefined)) { // Metrics are present and haven't changed detector.count++; @@ -2021,11 +2021,11 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) detector.lastStatusJson = ''; } } - + // Store the status data for potential retries entry.lastStatus = statusData; // This now stores the potentially modified statusData (e.g., status changed to 'error') entry.lastUpdated = Date.now(); - + // Update type if needed - could be more specific now (e.g., from 'album' to 'compilation') if (statusData.type && statusData.type !== entry.type) { entry.type = statusData.type; @@ -2039,23 +2039,23 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) // Update the title and artist with better information if available this.updateItemMetadata(entry, statusData, data); - + // Generate appropriate user-friendly message const message = this.getStatusMessage(statusData); - + // Update log message - but only if we're not handling a track update for an album/playlist // That case is handled separately in updateItemMetadata to ensure we show the right track info const logElement = document.getElementById(`log-${entry.uniqueId}-${entry.prgFile}`) as HTMLElement | null; - if (logElement && status !== 'error' && !(statusData.type === 'track' && statusData.parent && + if (logElement && status !== 'error' && !(statusData.type === 'track' && statusData.parent && (entry.type === 'album' || entry.type === 'playlist'))) { logElement.textContent = message; } - + // Handle real-time progress data for single track downloads if (status === 'real-time') { this.updateRealTimeProgress(entry, statusData); } - + // Handle overall progress for albums and playlists const isMultiTrack = entry.type === 'album' || entry.type === 'playlist'; if (isMultiTrack) { @@ -2064,10 +2064,10 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) // For single tracks, update the track progress this.updateSingleTrackProgress(entry, statusData); } - + // Apply appropriate status classes this.applyStatusClasses(entry, statusData); // Pass statusData instead of status string - + // Special handling for error status based on new API response format if (status === 'error') { entry.hasEnded = true; @@ -2097,7 +2097,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) const errorLogElement = document.getElementById(`log-${entry.uniqueId}-${entry.prgFile}`) as HTMLElement | null; // Use a different variable name if (errorLogElement) { // Check errorLogElement let errorMessageElement = errorLogElement.querySelector('.error-message') as HTMLElement | null; - + if (!errorMessageElement) { // If error UI (message and buttons) is not built yet // Build error UI with manual retry always available errorLogElement.innerHTML = ` @@ -2118,7 +2118,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) this.cleanupEntry(queueId); }); } - + const retryBtnElem = errorLogElement.querySelector('.retry-btn') as HTMLButtonElement | null; if (retryBtnElem) { retryBtnElem.addEventListener('click', (e: MouseEvent) => { // Add type for e @@ -2135,9 +2135,9 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) // Auto cleanup after 15s - only set this timeout once when error UI is first built setTimeout(() => { const currentEntryForCleanup = this.queueEntries[queueId]; - if (currentEntryForCleanup && - currentEntryForCleanup.hasEnded && - currentEntryForCleanup.lastStatus?.status === 'error' && + if (currentEntryForCleanup && + currentEntryForCleanup.hasEnded && + currentEntryForCleanup.lastStatus?.status === 'error' && !currentEntryForCleanup.isRetrying) { this.cleanupEntry(queueId); } @@ -2150,13 +2150,13 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } } } - + // Handle terminal states for non-error cases if (['complete', 'cancel', 'cancelled', 'done', 'skipped'].includes(status)) { entry.hasEnded = true; this.handleDownloadCompletion(entry, queueId, statusData); } - + // Cache the status for potential page reloads this.queueCache[entry.prgFile] = statusData; localStorage.setItem("downloadQueueCache", JSON.stringify(this.queueCache)); @@ -2166,11 +2166,11 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) updateItemMetadata(entry: QueueEntry, statusData: StatusData, data: StatusData) { // Add types const titleEl = entry.element.querySelector('.title') as HTMLElement | null; const artistEl = entry.element.querySelector('.artist') as HTMLElement | null; - + if (titleEl) { // Check various data sources for a better title let betterTitle: string | null | undefined = null; - + // First check the statusData if (statusData.song) { betterTitle = statusData.song; @@ -2189,7 +2189,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } else if (data.display_title) { betterTitle = data.display_title; } - + // Update title if we found a better one if (betterTitle && betterTitle !== titleEl.textContent) { titleEl.textContent = betterTitle; @@ -2197,7 +2197,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) entry.item.name = betterTitle; } } - + // Update artist if available if (artistEl) { let artist = statusData.artist || data.display_artist || ''; @@ -2208,19 +2208,19 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } } } - + // Update real-time progress for track downloads updateRealTimeProgress(entry: QueueEntry, statusData: StatusData) { // Add types // Get track progress bar const trackProgressBar = entry.element.querySelector('#track-progress-bar-' + entry.uniqueId + '-' + entry.prgFile) as HTMLElement | null; const timeElapsedEl = entry.element.querySelector('#time-elapsed-' + entry.uniqueId + '-' + entry.prgFile) as HTMLElement | null; - + if (trackProgressBar && statusData.progress !== undefined) { // Update track progress bar const progress = parseFloat(statusData.progress as string); // Cast to string trackProgressBar.style.width = `${progress}%`; trackProgressBar.setAttribute('aria-valuenow', progress.toString()); // Use string - + // Add success class when complete if (progress >= 100) { trackProgressBar.classList.add('complete'); @@ -2228,17 +2228,17 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) trackProgressBar.classList.remove('complete'); } } - + // Display time elapsed if available if (timeElapsedEl && statusData.time_elapsed !== undefined) { const seconds = Math.floor(statusData.time_elapsed / 1000); - const formattedTime = seconds < 60 - ? `${seconds}s` + const formattedTime = seconds < 60 + ? `${seconds}s` : `${Math.floor(seconds / 60)}m ${seconds % 60}s`; timeElapsedEl.textContent = formattedTime; } } - + // Update progress for single track downloads updateSingleTrackProgress(entry: QueueEntry, statusData: StatusData) { // Add types // Get track progress bar and other UI elements @@ -2247,16 +2247,16 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) const titleElement = entry.element.querySelector('.title') as HTMLElement | null; const artistElement = entry.element.querySelector('.artist') as HTMLElement | null; let progress = 0; // Declare progress here - + // If this track has a parent, this is actually part of an album/playlist // We should update the entry type and handle it as a multi-track download if (statusData.parent && (statusData.parent.type === 'album' || statusData.parent.type === 'playlist')) { // Store parent info entry.parentInfo = statusData.parent; - + // Update entry type to match parent type entry.type = statusData.parent.type; - + // Update UI to reflect the parent type const typeEl = entry.element.querySelector('.type') as HTMLElement | null; if (typeEl) { @@ -2265,7 +2265,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) // Update type class without triggering animation typeEl.className = `type ${entry.type}`; } - + // Update title and subtitle based on parent type if (statusData.parent.type === 'album') { if (titleElement) titleElement.textContent = statusData.parent.title || 'Unknown album'; @@ -2274,39 +2274,39 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (titleElement) titleElement.textContent = statusData.parent.name || 'Unknown playlist'; if (artistElement) artistElement.textContent = statusData.parent.owner || 'Unknown creator'; } - + // Now delegate to the multi-track progress updater this.updateMultiTrackProgress(entry, statusData); return; } - + // For standalone tracks (without parent), update title and subtitle if (!statusData.parent && statusData.song && titleElement) { titleElement.textContent = statusData.song; } - + if (!statusData.parent && statusData.artist && artistElement) { artistElement.textContent = statusData.artist; } - + // For individual track downloads, show the parent context if available if (!['done', 'complete', 'error', 'skipped'].includes(statusData.status || '')) { // Add null check // First check if we have parent data in the current status update if (statusData.parent && logElement) { // Store parent info in the entry for persistence across refreshes entry.parentInfo = statusData.parent; - + let infoText = ''; if (statusData.parent.type === 'album') { infoText = `From album: "${statusData.parent.title}"`; } else if (statusData.parent.type === 'playlist') { infoText = `From playlist: "${statusData.parent.name}" by ${statusData.parent.owner}`; } - + if (infoText) { logElement.textContent = infoText; } - } + } // If no parent in current update, use stored parent info if available else if (entry.parentInfo && logElement) { let infoText = ''; @@ -2315,16 +2315,16 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } else if (entry.parentInfo.type === 'playlist') { infoText = `From playlist: "${entry.parentInfo.name}" by ${entry.parentInfo.owner}`; } - + if (infoText) { logElement.textContent = infoText; } } } - + // Calculate progress based on available data progress = 0; - + // Real-time progress for direct track download if (statusData.status === 'real-time' && statusData.progress !== undefined) { progress = parseFloat(statusData.progress as string); // Cast to string @@ -2338,21 +2338,21 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) // If we don't have real-time progress but do have track position progress = (parseInt(statusData.current_track as string, 10) / parseInt(statusData.total_tracks as string, 10)) * 100; // Cast to string } - + // Update track progress bar if available if (trackProgressBar) { // Ensure numeric progress and prevent NaN const safeProgress = isNaN(progress) ? 0 : Math.max(0, Math.min(100, progress)); - + trackProgressBar.style.width = `${safeProgress}%`; trackProgressBar.setAttribute('aria-valuenow', safeProgress.toString()); // Use string - + // Make sure progress bar is visible const trackProgressContainer = entry.element.querySelector('#track-progress-container-' + entry.uniqueId + '-' + entry.prgFile) as HTMLElement | null; if (trackProgressContainer) { trackProgressContainer.style.display = 'block'; } - + // Add success class when complete if (safeProgress >= 100) { trackProgressBar.classList.add('complete'); @@ -2361,7 +2361,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } } } - + // Update progress for multi-track downloads (albums and playlists) updateMultiTrackProgress(entry: QueueEntry, statusData: StatusData) { // Add types // Get progress elements @@ -2372,17 +2372,17 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) const titleElement = entry.element.querySelector('.title') as HTMLElement | null; const artistElement = entry.element.querySelector('.artist') as HTMLElement | null; let progress = 0; // Declare progress here for this function's scope - + // Initialize track progress variables let currentTrack = 0; let totalTracks = 0; let trackProgress = 0; - + // Handle track-level updates for album/playlist downloads - if (statusData.type === 'track' && statusData.parent && + if (statusData.type === 'track' && statusData.parent && (entry.type === 'album' || entry.type === 'playlist')) { console.log('Processing track update for multi-track download:', statusData); - + // Update parent title/artist for album if (entry.type === 'album' && statusData.parent.type === 'album') { if (titleElement && statusData.parent.title) { @@ -2391,7 +2391,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (artistElement && statusData.parent.artist) { artistElement.textContent = statusData.parent.artist; } - } + } // Update parent title/owner for playlist else if (entry.type === 'playlist' && statusData.parent.type === 'playlist') { if (titleElement && statusData.parent.name) { @@ -2401,31 +2401,31 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) artistElement.textContent = statusData.parent.owner; } } - + // Get current track and total tracks from the status data if (statusData.current_track !== undefined) { currentTrack = parseInt(String(statusData.current_track), 10); - + // Get total tracks - try from statusData first, then from parent if (statusData.total_tracks !== undefined) { totalTracks = parseInt(String(statusData.total_tracks), 10); } else if (statusData.parent && statusData.parent.total_tracks !== undefined) { totalTracks = parseInt(String(statusData.parent.total_tracks), 10); } - + console.log(`Track info: ${currentTrack}/${totalTracks}`); } - + // Get track progress for real-time updates if (statusData.status === 'real-time' && statusData.progress !== undefined) { trackProgress = parseFloat(statusData.progress as string); // Cast to string } - + // Update the track progress counter display if (progressCounter && totalTracks > 0) { progressCounter.textContent = `${currentTrack}/${totalTracks}`; } - + // Update the status message to show current track if (logElement && statusData.song && statusData.artist) { let progressInfo = ''; @@ -2434,7 +2434,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } logElement.textContent = `Currently downloading: ${statusData.song} by ${statusData.artist} (${currentTrack}/${totalTracks}${progressInfo})`; } - + // Calculate and update the overall progress bar if (totalTracks > 0) { let overallProgress = 0; @@ -2448,20 +2448,20 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) overallProgress = (currentTrack / totalTracks) * 100; console.log(`Overall progress (non-real-time): ${overallProgress.toFixed(2)}% (Track ${currentTrack}/${totalTracks})`); } - + // Update the progress bar if (overallProgressBar) { const safeProgress = Math.max(0, Math.min(100, overallProgress)); overallProgressBar.style.width = `${safeProgress}%`; overallProgressBar.setAttribute('aria-valuenow', safeProgress.toString()); // Use string - + if (safeProgress >= 100) { overallProgressBar.classList.add('complete'); } else { overallProgressBar.classList.remove('complete'); } } - + // Update the track-level progress bar if (trackProgressBar) { // Make sure progress bar container is visible @@ -2469,14 +2469,14 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (trackProgressContainer) { trackProgressContainer.style.display = 'block'; } - + if (statusData.status === 'real-time') { // Real-time progress for the current track const safeTrackProgress = Math.max(0, Math.min(100, trackProgress)); trackProgressBar.style.width = `${safeTrackProgress}%`; trackProgressBar.setAttribute('aria-valuenow', safeTrackProgress.toString()); // Use string trackProgressBar.classList.add('real-time'); - + if (safeTrackProgress >= 100) { trackProgressBar.classList.add('complete'); } else { @@ -2489,14 +2489,14 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) trackProgressBar.setAttribute('aria-valuenow', "50"); // Use string } } - + // Store progress for potential later use entry.progress = overallProgress; } - + return; // Skip the standard handling below } - + // Standard handling for album/playlist direct updates (not track-level): // Update title and subtitle based on item type if (entry.type === 'album') { @@ -2514,7 +2514,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) artistElement.textContent = statusData.owner; } } - + // Extract track counting data from status data if (statusData.current_track && statusData.total_tracks) { currentTrack = parseInt(statusData.current_track as string, 10); // Cast to string @@ -2528,7 +2528,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) currentTrack = parseInt(parts[0], 10); totalTracks = parseInt(parts[1], 10); } - + // Get track progress for real-time downloads if (statusData.status === 'real-time' && statusData.progress !== undefined) { // For real-time downloads, progress comes as a percentage value (0-100) @@ -2545,12 +2545,12 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) // If we don't have real-time progress but do have track position progress = (parseInt(statusData.current_track as string, 10) / parseInt(statusData.total_tracks as string, 10)) * 100; // Cast to string } - + // Update progress counter if available if (progressCounter && totalTracks > 0) { progressCounter.textContent = `${currentTrack}/${totalTracks}`; } - + // Calculate overall progress let overallProgress = 0; if (totalTracks > 0) { @@ -2566,14 +2566,14 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } else { overallProgress = 0; } - + // Update overall progress bar if (overallProgressBar) { // Ensure progress is between 0-100 const safeProgress = Math.max(0, Math.min(100, overallProgress)); overallProgressBar.style.width = `${safeProgress}%`; overallProgressBar.setAttribute('aria-valuenow', String(safeProgress)); - + // Add success class when complete if (safeProgress >= 100) { overallProgressBar.classList.add('complete'); @@ -2581,7 +2581,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) overallProgressBar.classList.remove('complete'); } } - + // Update track progress bar for current track in multi-track items if (trackProgressBar) { // Make sure progress bar container is visible @@ -2589,7 +2589,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) if (trackProgressContainer) { trackProgressContainer.style.display = 'block'; } - + if (statusData.status === 'real-time' || statusData.status === 'real_time') { // For real-time updates, use the track progress for the small green progress bar // This shows download progress for the current track only @@ -2597,7 +2597,7 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) trackProgressBar.style.width = `${safeProgress}%`; trackProgressBar.setAttribute('aria-valuenow', String(safeProgress)); trackProgressBar.classList.add('real-time'); - + if (safeProgress >= 100) { trackProgressBar.classList.add('complete'); } else { @@ -2617,12 +2617,12 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) trackProgressBar.setAttribute('aria-valuenow', String(trackPositionPercent)); } } - + // Store the progress in the entry for potential later use entry.progress = overallProgress; } } - + /* Close all active polling intervals */ clearAllPollingIntervals() { for (const queueId in this.pollingIntervals) { @@ -2752,4 +2752,4 @@ createQueueItem(item: QueueItem, type: string, prgFile: string, queueId: string) } // Singleton instance -export const downloadQueue = new DownloadQueue(); \ No newline at end of file +export const downloadQueue = new DownloadQueue(); \ No newline at end of file diff --git a/src/js/track.ts b/src/js/track.ts index e5cac4d..08b3a7c 100644 --- a/src/js/track.ts +++ b/src/js/track.ts @@ -83,7 +83,7 @@ function renderTrack(track: any) { // Show placeholder for explicit content const loadingElExplicit = document.getElementById('loading'); if (loadingElExplicit) loadingElExplicit.classList.add('hidden'); - + const placeholderContent = `

Explicit Content Filtered

@@ -91,13 +91,13 @@ function renderTrack(track: any) {

The explicit content filter is controlled by environment variables.

`; - + const contentContainer = document.getElementById('track-header'); if (contentContainer) { contentContainer.innerHTML = placeholderContent; contentContainer.classList.remove('hidden'); } - + return; // Stop rendering the actual track content } @@ -107,7 +107,7 @@ function renderTrack(track: any) { trackNameEl.innerHTML = `
${track.name || 'Unknown Track'}`; } - + const trackArtistEl = document.getElementById('track-artist'); if (trackArtistEl) { trackArtistEl.innerHTML = @@ -115,19 +115,19 @@ function renderTrack(track: any) { `${a?.name || 'Unknown Artist'}` ).join(', ') || 'Unknown Artist'}`; } - + const trackAlbumEl = document.getElementById('track-album'); if (trackAlbumEl) { trackAlbumEl.innerHTML = `Album: ${track.album?.name || 'Unknown Album'} (${track.album?.album_type || 'album'})`; } - + const trackDurationEl = document.getElementById('track-duration'); if (trackDurationEl) { trackDurationEl.textContent = `Duration: ${msToTime(track.duration_ms || 0)}`; } - + const trackExplicitEl = document.getElementById('track-explicit'); if (trackExplicitEl) { trackExplicitEl.textContent = @@ -178,7 +178,7 @@ function renderTrack(track: any) { downloadBtn.addEventListener('click', () => { downloadBtn.disabled = true; downloadBtn.innerHTML = `Queueing...`; - + const trackUrl = track.external_urls?.spotify || ''; if (!trackUrl) { showError('Missing track URL'); @@ -193,7 +193,7 @@ function renderTrack(track: any) { downloadBtn.innerHTML = `Download`; return; } - + // Use the centralized downloadQueue.download method downloadQueue.download(trackIdToDownload, 'track', { name: track.name || 'Unknown Track', artist: track.artists?.[0]?.name }) .then(() => { @@ -219,7 +219,7 @@ function renderTrack(track: any) { */ function msToTime(duration: number) { if (!duration || isNaN(duration)) return '0:00'; - + const minutes = Math.floor(duration / 60000); const seconds = Math.floor((duration % 60000) / 1000); return `${minutes}:${seconds.toString().padStart(2, '0')}`; @@ -244,11 +244,11 @@ async function startDownload(itemId: string, type: string, item: any) { showError('Missing ID or type for download'); return; } - + try { // Use the centralized downloadQueue.download method await downloadQueue.download(itemId, type, item); - + // Make the queue visible after queueing downloadQueue.toggleVisibility(true); } catch (error: any) { diff --git a/src/js/watch.ts b/src/js/watch.ts index d2710cf..0dfabfb 100644 --- a/src/js/watch.ts +++ b/src/js/watch.ts @@ -28,9 +28,9 @@ interface WatchedPlaylistOwner { // Kept as is, used by PlaylistFromWatchList interface PlaylistFromWatchList { spotify_id: string; // Changed from id to spotify_id name: string; - owner?: WatchedPlaylistOwner; + owner?: WatchedPlaylistOwner; images?: Image[]; // Ensure images can be part of this initial fetch - total_tracks?: number; + total_tracks?: number; } // New interface for playlists after initial processing (spotify_id mapped to id) @@ -97,8 +97,8 @@ type FinalCardItem = FinalArtistCardItem | FinalPlaylistCardItem; // The type for items initially fetched from /watch/list, before detailed processing // Updated to use ProcessedArtistFromWatchList for artists and ProcessedPlaylistFromWatchList for playlists -type InitialWatchedItem = - (ProcessedArtistFromWatchList & { itemType: 'artist' }) | +type InitialWatchedItem = + (ProcessedArtistFromWatchList & { itemType: 'artist' }) | (ProcessedPlaylistFromWatchList & { itemType: 'playlist' }); // Interface for a settled promise (fulfilled) @@ -260,7 +260,7 @@ document.addEventListener('DOMContentLoaded', async function() { // Ensure the main loading indicator is also hidden if it was shown by default if (loadingIndicator) loadingIndicator.classList.add('hidden'); } -}); +}); const MAX_NOTIFICATIONS = 3; @@ -287,15 +287,15 @@ async function loadWatchedItems() { const playlists: PlaylistFromWatchList[] = await playlistsResponse.json(); const initialItems: InitialWatchedItem[] = [ - ...artists.map(artist => ({ - ...artist, + ...artists.map(artist => ({ + ...artist, id: artist.spotify_id, // Map spotify_id to id for artists - itemType: 'artist' as const + itemType: 'artist' as const })), - ...playlists.map(playlist => ({ - ...playlist, + ...playlists.map(playlist => ({ + ...playlist, id: playlist.spotify_id, // Map spotify_id to id for playlists - itemType: 'playlist' as const + itemType: 'playlist' as const })) ]; @@ -374,7 +374,7 @@ async function loadWatchedItems() { // Simulating Promise.allSettled behavior for compatibility const settledResults: CustomSettledPromiseResult[] = await Promise.all( - detailedItemPromises.map(p => + detailedItemPromises.map(p => p.then(value => ({ status: 'fulfilled', value } as CustomPromiseFulfilledResult)) .catch(reason => ({ status: 'rejected', reason } as CustomPromiseRejectedResult)) ) @@ -510,7 +510,7 @@ function createWatchedItemCard(item: FinalCardItem): HTMLDivElement { } } - cardElement.innerHTML = ` + cardElement.innerHTML = `
${item.name}
@@ -531,7 +531,7 @@ function createWatchedItemCard(item: FinalCardItem): HTMLDivElement { cardElement.addEventListener('click', (e: MouseEvent) => { const target = e.target as HTMLElement; // Don't navigate if any button within the card was clicked - if (target.closest('button')) { + if (target.closest('button')) { return; } window.location.href = `/${item.itemType}/${item.id}`; @@ -541,7 +541,7 @@ function createWatchedItemCard(item: FinalCardItem): HTMLDivElement { const checkNowBtn = cardElement.querySelector('.check-item-now-btn') as HTMLButtonElement | null; if (checkNowBtn) { checkNowBtn.addEventListener('click', (e: MouseEvent) => { - e.stopPropagation(); + e.stopPropagation(); const itemId = checkNowBtn.dataset.id; const itemType = checkNowBtn.dataset.type as 'artist' | 'playlist'; if (itemId && itemType) { @@ -591,7 +591,7 @@ async function unwatchItem(itemId: string, itemType: 'artist' | 'playlist', butt } const result = await response.json(); showNotification(result.message || `${itemType.charAt(0).toUpperCase() + itemType.slice(1)} unwatched successfully.`); - + cardElement.style.transition = 'opacity 0.5s ease, transform 0.5s ease'; cardElement.style.opacity = '0'; cardElement.style.transform = 'scale(0.9)'; @@ -614,7 +614,7 @@ async function unwatchItem(itemId: string, itemType: 'artist' | 'playlist', butt if (totalItemsLeft === 0) { // If all items are gone (either from groups or directly), reload to show empty state. // This also correctly handles the case where the initial list had <= 8 items. - loadWatchedItems(); + loadWatchedItems(); } }, 500); @@ -632,7 +632,7 @@ async function triggerItemCheck(itemId: string, itemType: 'artist' | 'playlist', buttonElement.disabled = true; // Keep the icon, but we can add a class for spinning or use the same icon. // For simplicity, just using the same icon. Text "Checking..." is removed. - buttonElement.innerHTML = 'Checking...'; + buttonElement.innerHTML = 'Checking...'; const endpoint = `/api/${itemType}/watch/trigger_check/${itemId}`; @@ -656,7 +656,7 @@ async function triggerItemCheck(itemId: string, itemType: 'artist' | 'playlist', // Helper function to show notifications (can be moved to a shared utility file if used elsewhere) function showNotification(message: string, isError: boolean = false) { const notificationArea = document.getElementById('notificationArea') || createNotificationArea(); - + // Limit the number of visible notifications while (notificationArea.childElementCount >= MAX_NOTIFICATIONS) { const oldestNotification = notificationArea.firstChild; // In column-reverse, firstChild is visually the bottom one @@ -670,9 +670,9 @@ function showNotification(message: string, isError: boolean = false) { const notification = document.createElement('div'); notification.className = `notification-toast ${isError ? 'error' : 'success'}`; notification.textContent = message; - + notificationArea.appendChild(notification); - + // Auto-remove after 5 seconds setTimeout(() => { notification.classList.add('hide'); @@ -685,4 +685,4 @@ function createNotificationArea(): HTMLElement { area.id = 'notificationArea'; document.body.appendChild(area); return area; -} \ No newline at end of file +} \ No newline at end of file diff --git a/static/css/album/album.css b/static/css/album/album.css index 4f389e3..3f71721 100644 --- a/static/css/album/album.css +++ b/static/css/album/album.css @@ -297,7 +297,7 @@ body { grid-template-columns: 30px 1fr auto auto; padding: 0.6rem 0.8rem; } - + .track-duration { margin-right: 0.5rem; } @@ -333,15 +333,15 @@ body { .track { grid-template-columns: 30px 1fr auto; } - + .track-info { padding: 0 0.5rem; } - + .track-name, .track-artist { max-width: 200px; } - + .section-title { font-size: 1.25rem; } diff --git a/static/css/artist/artist.css b/static/css/artist/artist.css index 62f6723..3c0a75e 100644 --- a/static/css/artist/artist.css +++ b/static/css/artist/artist.css @@ -381,18 +381,18 @@ body { align-items: center; text-align: center; } - + #artist-image { width: 180px; height: 180px; margin-bottom: 1rem; } - + .track { flex-direction: column; align-items: center; } - + .track-album, .track-duration { margin-left: 0; @@ -400,17 +400,17 @@ body { width: 100%; text-align: center; } - + .albums-list { grid-template-columns: repeat(auto-fill, minmax(160px, 1fr)); gap: 1rem; } - + .album-group-header { flex-direction: column; align-items: flex-start; } - + .group-download-btn { margin-top: 0.5rem; } @@ -421,28 +421,28 @@ body { #app { padding: 10px; } - + #artist-name { font-size: 1.75rem; } - + .track { padding: 0.8rem; flex-direction: column; align-items: center; text-align: center; } - + .track-number { font-size: 0.9rem; margin-right: 0; margin-bottom: 0.5rem; } - + .track-info { align-items: center; } - + .track-album, .track-duration { margin-left: 0; @@ -450,20 +450,20 @@ body { width: 100%; text-align: center; } - + .albums-list { grid-template-columns: repeat(auto-fill, minmax(130px, 1fr)); gap: 0.75rem; } - + .album-info { padding: 0.5rem; } - + .album-title { font-size: 0.9rem; } - + .album-artist { font-size: 0.8rem; } @@ -564,15 +564,15 @@ a:focus { padding: 8px; /* Spacing around the buttons */ border-top: 1px solid var(--color-surface-darker, #2a2a2a); /* Separator line */ /* Ensure it takes up full width of the card if not already */ - width: 100%; + width: 100%; } /* Persistent action button (e.g., toggle known/missing) on album card - BOTTOM-LEFT */ .persistent-album-action-btn { /* position: absolute; */ /* No longer absolute */ - /* bottom: 8px; */ + /* bottom: 8px; */ /* left: 8px; */ - /* z-index: 2; */ + /* z-index: 2; */ opacity: 1; /* Ensure it is visible */ /* Specific margin if needed, but flexbox space-between should handle it */ margin: 0; /* Reset any previous margins */ @@ -597,11 +597,11 @@ a:focus { /* NEW STYLES FOR BUTTON STATES */ .persistent-album-action-btn.status-missing { background-color: #d9534f; /* Bootstrap's btn-danger red */ - border-color: #d43f3a; + border-color: #d43f3a; } .persistent-album-action-btn.status-missing:hover { - background-color: #c9302c; + background-color: #c9302c; border-color: #ac2925; } diff --git a/static/css/config/config.css b/static/css/config/config.css index 0938b1a..927c32c 100644 --- a/static/css/config/config.css +++ b/static/css/config/config.css @@ -69,7 +69,7 @@ body { #downloadQueue { position: fixed; top: 0; - right: -350px; + right: -350px; width: 350px; height: 100vh; background: #181818; @@ -185,20 +185,20 @@ body { right: -100%; padding: 15px; } - + #downloadQueue.active { right: 0; } - + .sidebar-header { padding-bottom: 12px; margin-bottom: 15px; } - + .sidebar-header h2 { font-size: 1.1rem; } - + #cancelAllBtn { padding: 6px 10px; font-size: 12px; @@ -695,40 +695,40 @@ input:checked + .slider:before { .config-container { padding: 1.5rem 1rem; } - + .config-header { flex-direction: column; gap: 1rem; align-items: flex-start; } - + /* Increase touch target sizes for buttons and selects */ .form-select { padding: 0.8rem 2rem 0.8rem 1rem; font-size: 0.9rem; } - + .service-tabs { flex-wrap: wrap; } - + .tab-button { flex: 1 1 auto; text-align: center; margin-bottom: 0.5rem; } - + .credential-item { flex-direction: column; align-items: flex-start; gap: 0.75rem; } - + .credential-info { width: 100%; margin-bottom: 1rem; } - + .credential-actions { width: 100%; display: flex; @@ -736,13 +736,13 @@ input:checked + .slider:before { flex-wrap: wrap; gap: 0.5rem; } - + /* Adjust toggle switch size for better touch support */ .switch { width: 52px; height: 26px; } - + .slider:before { height: 20px; width: 20px; @@ -753,34 +753,34 @@ input:checked + .slider:before { .config-container { padding: 1rem; } - + .account-config, .credentials-list, .credentials-form { padding: 1rem; border-radius: 8px; } - + .section-title { font-size: 1.3rem; } - + .config-item label { font-size: 0.95rem; } - + .form-select, .form-input { padding: 0.7rem 1.8rem 0.7rem 0.8rem; font-size: 0.9rem; } - + .save-btn { width: 100%; padding: 0.7rem; font-size: 0.9rem; } - + /* Position floating icons a bit closer to the edges on small screens */ .back-button.floating-icon { width: 60px; @@ -788,7 +788,7 @@ input:checked + .slider:before { left: 16px; bottom: 16px; } - + .back-button.floating-icon img { width: 28px; height: 28px; @@ -971,8 +971,8 @@ input:checked + .slider:before { font-size: 0.95rem; cursor: pointer; /* Reset some global label styles if they interfere */ - display: inline; - margin-bottom: 0; + display: inline; + margin-bottom: 0; } /* Urgent Warning Message Style */ diff --git a/static/css/history/history.css b/static/css/history/history.css index ea4edba..5267b84 100644 --- a/static/css/history/history.css +++ b/static/css/history/history.css @@ -118,4 +118,4 @@ tr:nth-child(even) { .details-btn:hover { background-color: #333; /* Darker on hover */ -} \ No newline at end of file +} \ No newline at end of file diff --git a/static/css/main/base.css b/static/css/main/base.css index 12c2968..89cab1e 100644 --- a/static/css/main/base.css +++ b/static/css/main/base.css @@ -16,12 +16,12 @@ --color-surface: #1c1c1c; --color-surface-hover: #2a2a2a; --color-border: #2a2a2a; - + /* Text colors */ --color-text-primary: #ffffff; --color-text-secondary: #b3b3b3; --color-text-tertiary: #757575; - + /* Brand colors */ --color-primary: #1db954; --color-primary-hover: #17a44b; @@ -30,19 +30,19 @@ /* Adding accent green if not present, or ensuring it is */ --color-accent-green: #22c55e; /* Example: A Tailwind-like green */ --color-accent-green-dark: #16a34a; /* Darker shade for hover */ - + /* Spacing */ --space-xs: 0.25rem; --space-sm: 0.5rem; --space-md: 1rem; --space-lg: 1.5rem; --space-xl: 2rem; - + /* Shadow */ --shadow-sm: 0 1px 3px rgba(0,0,0,0.12), 0 1px 2px rgba(0,0,0,0.24); --shadow-md: 0 4px 6px rgba(0,0,0,0.1); --shadow-lg: 0 10px 20px rgba(0,0,0,0.19), 0 6px 6px rgba(0,0,0,0.23); - + /* Border radius */ --radius-sm: 4px; --radius-md: 8px; @@ -383,16 +383,16 @@ a:hover, a:focus { flex-direction: column; text-align: center; } - + .header-image { width: 150px; height: 150px; } - + .header-title { font-size: 1.75rem; } - + .track-item { grid-template-columns: 30px 1fr auto; } @@ -402,46 +402,46 @@ a:hover, a:focus { .app-container { padding: var(--space-md); } - + .header-image { width: 120px; height: 120px; } - + .header-title { font-size: 1.5rem; } - + .header-subtitle { font-size: 0.9rem; } - + .header-actions { flex-direction: column; width: 100%; } - + .download-btn { width: 100%; } - + /* Adjust floating icons size for very small screens */ .floating-icon { width: 60px; height: 60px; } - + .floating-icon img { width: 28px; height: 28px; } - + /* Position floating icons a bit closer to the edges on small screens */ .settings-icon { left: 16px; bottom: 16px; } - + .queue-icon { right: 16px; bottom: 16px; @@ -527,4 +527,4 @@ a:hover, a:focus { width: 20px; height: 20px; filter: brightness(0) invert(1); -} \ No newline at end of file +} \ No newline at end of file diff --git a/static/css/main/icons.css b/static/css/main/icons.css index bd0f4f2..71b9d87 100644 --- a/static/css/main/icons.css +++ b/static/css/main/icons.css @@ -180,7 +180,7 @@ } .back-button { /* Specific to config page */ - bottom: 20px; + bottom: 20px; } /* New History button specific positioning - above other left buttons */ diff --git a/static/css/main/main.css b/static/css/main/main.css index 5c069c2..6a26d35 100644 --- a/static/css/main/main.css +++ b/static/css/main/main.css @@ -101,7 +101,7 @@ body { min-width: 100px; } -.search-type:hover, +.search-type:hover, .search-type:focus { background: var(--color-surface-hover); } @@ -270,22 +270,22 @@ body { padding: 15px 0; gap: 12px; } - + .search-input-container { flex: 1 1 100%; order: 1; } - + .search-button { order: 2; flex: 1; } - + .results-grid { grid-template-columns: repeat(auto-fill, minmax(160px, 1fr)); gap: 15px; } - + /* Smaller download button for mobile */ .download-btn { padding: 0.5rem 0.8rem; @@ -297,29 +297,29 @@ body { .search-header { padding: 10px 0; } - + .search-type { min-width: 80px; padding: 12px 10px; } - + .search-button { padding: 12px 15px; } - + .results-grid { grid-template-columns: repeat(auto-fill, minmax(140px, 1fr)); gap: 12px; } - + .track-title, .track-artist { font-size: 0.9rem; } - + .track-details { font-size: 0.8rem; } - + /* Even smaller download button for very small screens */ .download-btn { padding: 0.4rem 0.7rem; @@ -327,11 +327,11 @@ body { margin: 0 0.8rem 0.8rem; max-width: calc(100% - 1.6rem); } - + .empty-state h2 { font-size: 1.5rem; } - + .empty-state p { font-size: 0.9rem; } diff --git a/static/css/playlist/playlist.css b/static/css/playlist/playlist.css index 7511d07..aa8a0b4 100644 --- a/static/css/playlist/playlist.css +++ b/static/css/playlist/playlist.css @@ -291,18 +291,18 @@ body { align-items: center; text-align: center; } - + #playlist-image { width: 180px; height: 180px; margin-bottom: 1rem; } - + .track { flex-direction: column; align-items: flex-start; } - + .track-album, .track-duration { margin-left: 0; @@ -317,11 +317,11 @@ body { #app { padding: 10px; } - + #playlist-name { font-size: 1.75rem; } - + /* Adjust track layout to vertical & centered */ .track { padding: 0.8rem; @@ -329,17 +329,17 @@ body { align-items: center; text-align: center; } - + .track-number { font-size: 0.9rem; margin-right: 0; margin-bottom: 0.5rem; } - + .track-info { align-items: center; } - + .track-album, .track-duration { margin-left: 0; @@ -443,7 +443,7 @@ a:focus { .track { grid-template-columns: 40px 1fr auto auto; } - + .track-album { display: none; } @@ -453,7 +453,7 @@ a:focus { .playlist-description { max-width: 100%; } - + #downloadAlbumsBtn { margin-top: 0.5rem; } @@ -463,7 +463,7 @@ a:focus { .track { grid-template-columns: 30px 1fr auto; } - + .playlist-description { margin-bottom: 1rem; } diff --git a/static/css/queue/queue.css b/static/css/queue/queue.css index 4cc80e0..f52c882 100644 --- a/static/css/queue/queue.css +++ b/static/css/queue/queue.css @@ -15,7 +15,7 @@ z-index: 1001; /* Remove overflow-y here to delegate scrolling to the queue items container */ box-shadow: -20px 0 30px rgba(0, 0, 0, 0.4); - + /* Added for flex layout */ display: flex; flex-direction: column; @@ -287,7 +287,7 @@ } /* Optional state indicators for each queue item */ -.queue-item--complete, +.queue-item--complete, .queue-item.download-success { border-left-color: #1DB954; } @@ -689,7 +689,7 @@ right: -100%; /* Off-screen fully */ padding: 15px; } - + /* When active, the sidebar slides into view from full width */ #downloadQueue.active { right: 0; @@ -702,44 +702,44 @@ padding-bottom: 12px; margin-bottom: 15px; } - + .sidebar-header h2 { font-size: 1.1rem; } - + /* Reduce the size of the close buttons */ .close-btn { width: 28px; height: 28px; font-size: 18px; } - + /* Adjust queue items padding */ .queue-item { padding: 12px; margin-bottom: 12px; } - + /* Ensure text remains legible on smaller screens */ .queue-item .log, .queue-item .type { font-size: 12px; } - + #cancelAllBtn { padding: 6px 10px; font-size: 12px; } - + .error-buttons { flex-direction: row; } - + .close-error-btn { width: 28px; height: 28px; } - + .retry-btn { padding: 6px 12px !important; } diff --git a/static/css/track/track.css b/static/css/track/track.css index 9ee57bb..f94d9a6 100644 --- a/static/css/track/track.css +++ b/static/css/track/track.css @@ -343,7 +343,7 @@ a:focus { gap: 0.25rem; margin-bottom: 1rem; } - + #track-name a { font-size: 1.75rem; } @@ -353,7 +353,7 @@ a:focus { #track-name a { font-size: 1.5rem; } - + .track-details { margin-bottom: 1.5rem; } diff --git a/static/css/watch/watch.css b/static/css/watch/watch.css index 1a33a65..9ca4546 100644 --- a/static/css/watch/watch.css +++ b/static/css/watch/watch.css @@ -298,7 +298,7 @@ body { /* Ensure the main watchedItemsContainer still behaves like a grid if there are few items */ #watchedItemsContainer:not(:has(.watched-items-group)) { display: grid; - /* Assuming results-grid styles are already defined elsewhere, + /* Assuming results-grid styles are already defined elsewhere, or copy relevant grid styles here if needed */ } @@ -308,9 +308,9 @@ body { bottom: 20px; left: 50%; /* Center horizontally */ transform: translateX(-50%); /* Adjust for exact centering */ - z-index: 2000; + z-index: 2000; display: flex; - flex-direction: column-reverse; + flex-direction: column-reverse; gap: 10px; width: auto; /* Allow width to be determined by content */ max-width: 90%; /* Prevent it from being too wide on large screens */ @@ -356,4 +356,4 @@ body { .spin-counter-clockwise { animation: spin-counter-clockwise 1s linear infinite; -} \ No newline at end of file +} \ No newline at end of file diff --git a/static/html/album.html b/static/html/album.html index ba32c49..a7d8797 100644 --- a/static/html/album.html +++ b/static/html/album.html @@ -15,14 +15,14 @@ - +