complete overhaul with pre-commit hooks

This commit is contained in:
Mustafa Soylu
2025-06-07 18:47:18 +02:00
parent 3971dba9bf
commit 62cbeeb513
71 changed files with 4200 additions and 2820 deletions

4
app.py
View File

@@ -251,8 +251,8 @@ if __name__ == "__main__":
# Set file permissions for log files if needed # Set file permissions for log files if needed
try: try:
os.chmod(log_handler.baseFilename, 0o666) os.chmod(log_handler.baseFilename, 0o666)
except: except (OSError, FileNotFoundError) as e:
logging.warning("Could not set permissions on log file") logging.warning(f"Could not set permissions on log file: {str(e)}")
# Log application startup # Log application startup
logging.info("=== Spotizerr Application Starting ===") logging.info("=== Spotizerr Application Starting ===")

View File

@@ -3,22 +3,26 @@ import atexit
# Configure basic logging for the application if not already configured # Configure basic logging for the application if not already configured
# This is a good place for it if routes are a central part of your app structure. # This is a good place for it if routes are a central part of your app structure.
logging.basicConfig(level=logging.INFO, logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
try: try:
from routes.utils.watch.manager import start_watch_manager, stop_watch_manager from routes.utils.watch.manager import start_watch_manager, stop_watch_manager
# Start the playlist watch manager when the application/blueprint is initialized # Start the playlist watch manager when the application/blueprint is initialized
start_watch_manager() start_watch_manager()
# Register the stop function to be called on application exit # Register the stop function to be called on application exit
atexit.register(stop_watch_manager) atexit.register(stop_watch_manager)
logger.info("Playlist Watch Manager initialized and registered for shutdown.") logger.info("Playlist Watch Manager initialized and registered for shutdown.")
except ImportError as e: except ImportError as e:
logger.error(f"Could not import or start Playlist Watch Manager: {e}. Playlist watching will be disabled.") logger.error(
f"Could not import or start Playlist Watch Manager: {e}. Playlist watching will be disabled."
)
except Exception as e: except Exception as e:
logger.error(f"An unexpected error occurred during Playlist Watch Manager setup: {e}", exc_info=True) logger.error(
f"An unexpected error occurred during Playlist Watch Manager setup: {e}",
from .artist import artist_bp exc_info=True,
from .prgs import prgs_bp )

View File

@@ -1,6 +1,5 @@
from flask import Blueprint, Response, request from flask import Blueprint, Response, request
import json import json
import os
import traceback import traceback
import uuid import uuid
import time import time
@@ -8,9 +7,10 @@ from routes.utils.celery_queue_manager import download_queue_manager
from routes.utils.celery_tasks import store_task_info, store_task_status, ProgressState from routes.utils.celery_tasks import store_task_info, store_task_status, ProgressState
from routes.utils.get_info import get_spotify_info from routes.utils.get_info import get_spotify_info
album_bp = Blueprint('album', __name__) album_bp = Blueprint("album", __name__)
@album_bp.route('/download/<album_id>', methods=['GET'])
@album_bp.route("/download/<album_id>", methods=["GET"])
def handle_download(album_id): def handle_download(album_id):
# Retrieve essential parameters from the request. # Retrieve essential parameters from the request.
# name = request.args.get('name') # name = request.args.get('name')
@@ -22,21 +22,33 @@ def handle_download(album_id):
# Fetch metadata from Spotify # Fetch metadata from Spotify
try: try:
album_info = get_spotify_info(album_id, "album") album_info = get_spotify_info(album_id, "album")
if not album_info or not album_info.get('name') or not album_info.get('artists'): if (
not album_info
or not album_info.get("name")
or not album_info.get("artists")
):
return Response( return Response(
json.dumps({"error": f"Could not retrieve metadata for album ID: {album_id}"}), json.dumps(
{"error": f"Could not retrieve metadata for album ID: {album_id}"}
),
status=404, status=404,
mimetype='application/json' mimetype="application/json",
) )
name_from_spotify = album_info.get('name') name_from_spotify = album_info.get("name")
artist_from_spotify = album_info['artists'][0].get('name') if album_info['artists'] else "Unknown Artist" artist_from_spotify = (
album_info["artists"][0].get("name")
if album_info["artists"]
else "Unknown Artist"
)
except Exception as e: except Exception as e:
return Response( return Response(
json.dumps({"error": f"Failed to fetch metadata for album {album_id}: {str(e)}"}), json.dumps(
{"error": f"Failed to fetch metadata for album {album_id}: {str(e)}"}
),
status=500, status=500,
mimetype='application/json' mimetype="application/json",
) )
# Validate required parameters # Validate required parameters
@@ -44,7 +56,7 @@ def handle_download(album_id):
return Response( return Response(
json.dumps({"error": "Missing required parameter: url"}), json.dumps({"error": "Missing required parameter: url"}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
# Add the task to the queue with only essential parameters # Add the task to the queue with only essential parameters
@@ -53,98 +65,97 @@ def handle_download(album_id):
orig_params = request.args.to_dict() orig_params = request.args.to_dict()
orig_params["original_url"] = request.url orig_params["original_url"] = request.url
try: try:
task_id = download_queue_manager.add_task({ task_id = download_queue_manager.add_task(
{
"download_type": "album", "download_type": "album",
"url": url, "url": url,
"name": name_from_spotify, "name": name_from_spotify,
"artist": artist_from_spotify, "artist": artist_from_spotify,
"orig_request": orig_params "orig_request": orig_params,
}) }
)
except Exception as e: except Exception as e:
# Generic error handling for other issues during task submission # Generic error handling for other issues during task submission
# Create an error task ID if add_task itself fails before returning an ID # Create an error task ID if add_task itself fails before returning an ID
error_task_id = str(uuid.uuid4()) error_task_id = str(uuid.uuid4())
store_task_info(error_task_id, { store_task_info(
error_task_id,
{
"download_type": "album", "download_type": "album",
"url": url, "url": url,
"name": name_from_spotify, "name": name_from_spotify,
"artist": artist_from_spotify, "artist": artist_from_spotify,
"original_request": orig_params, "original_request": orig_params,
"created_at": time.time(), "created_at": time.time(),
"is_submission_error_task": True "is_submission_error_task": True,
}) },
store_task_status(error_task_id, { )
store_task_status(
error_task_id,
{
"status": ProgressState.ERROR, "status": ProgressState.ERROR,
"error": f"Failed to queue album download: {str(e)}", "error": f"Failed to queue album download: {str(e)}",
"timestamp": time.time() "timestamp": time.time(),
}) },
)
return Response( return Response(
json.dumps({"error": f"Failed to queue album download: {str(e)}", "task_id": error_task_id}), json.dumps(
{
"error": f"Failed to queue album download: {str(e)}",
"task_id": error_task_id,
}
),
status=500, status=500,
mimetype='application/json' mimetype="application/json",
) )
return Response( return Response(
json.dumps({"prg_file": task_id}), json.dumps({"prg_file": task_id}), status=202, mimetype="application/json"
status=202,
mimetype='application/json'
) )
@album_bp.route('/download/cancel', methods=['GET'])
@album_bp.route("/download/cancel", methods=["GET"])
def cancel_download(): def cancel_download():
""" """
Cancel a running download process by its prg file name. Cancel a running download process by its prg file name.
""" """
prg_file = request.args.get('prg_file') prg_file = request.args.get("prg_file")
if not prg_file: if not prg_file:
return Response( return Response(
json.dumps({"error": "Missing process id (prg_file) parameter"}), json.dumps({"error": "Missing process id (prg_file) parameter"}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
# Use the queue manager's cancellation method. # Use the queue manager's cancellation method.
result = download_queue_manager.cancel_task(prg_file) result = download_queue_manager.cancel_task(prg_file)
status_code = 200 if result.get("status") == "cancelled" else 404 status_code = 200 if result.get("status") == "cancelled" else 404
return Response( return Response(json.dumps(result), status=status_code, mimetype="application/json")
json.dumps(result),
status=status_code,
mimetype='application/json'
)
@album_bp.route('/info', methods=['GET'])
@album_bp.route("/info", methods=["GET"])
def get_album_info(): def get_album_info():
""" """
Retrieve Spotify album metadata given a Spotify album ID. Retrieve Spotify album metadata given a Spotify album ID.
Expects a query parameter 'id' that contains the Spotify album ID. Expects a query parameter 'id' that contains the Spotify album ID.
""" """
spotify_id = request.args.get('id') spotify_id = request.args.get("id")
if not spotify_id: if not spotify_id:
return Response( return Response(
json.dumps({"error": "Missing parameter: id"}), json.dumps({"error": "Missing parameter: id"}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
try: try:
# Import and use the get_spotify_info function from the utility module. # Import and use the get_spotify_info function from the utility module.
from routes.utils.get_info import get_spotify_info from routes.utils.get_info import get_spotify_info
album_info = get_spotify_info(spotify_id, "album") album_info = get_spotify_info(spotify_id, "album")
return Response( return Response(json.dumps(album_info), status=200, mimetype="application/json")
json.dumps(album_info),
status=200,
mimetype='application/json'
)
except Exception as e: except Exception as e:
error_data = { error_data = {"error": str(e), "traceback": traceback.format_exc()}
"error": str(e), return Response(json.dumps(error_data), status=500, mimetype="application/json")
"traceback": traceback.format_exc()
}
return Response(
json.dumps(error_data),
status=500,
mimetype='application/json'
)

View File

@@ -1,13 +1,10 @@
#!/usr/bin/env python3
""" """
Artist endpoint blueprint. Artist endpoint blueprint.
""" """
from flask import Blueprint, Response, request, jsonify from flask import Blueprint, Response, request, jsonify
import json import json
import os
import traceback import traceback
from routes.utils.celery_queue_manager import download_queue_manager
from routes.utils.artist import download_artist_albums from routes.utils.artist import download_artist_albums
# Imports for merged watch functionality # Imports for merged watch functionality
@@ -20,22 +17,23 @@ from routes.utils.watch.db import (
get_watched_artists, get_watched_artists,
add_specific_albums_to_artist_table, add_specific_albums_to_artist_table,
remove_specific_albums_from_artist_table, remove_specific_albums_from_artist_table,
is_album_in_artist_db is_album_in_artist_db,
) )
from routes.utils.watch.manager import check_watched_artists, get_watch_config from routes.utils.watch.manager import check_watched_artists, get_watch_config
from routes.utils.get_info import get_spotify_info from routes.utils.get_info import get_spotify_info
artist_bp = Blueprint('artist', __name__, url_prefix='/api/artist') artist_bp = Blueprint("artist", __name__, url_prefix="/api/artist")
# Existing log_json can be used, or a logger instance. # Existing log_json can be used, or a logger instance.
# Let's initialize a logger for consistency with merged code. # Let's initialize a logger for consistency with merged code.
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def log_json(message_dict): def log_json(message_dict):
print(json.dumps(message_dict)) print(json.dumps(message_dict))
@artist_bp.route('/download/<artist_id>', methods=['GET']) @artist_bp.route("/download/<artist_id>", methods=["GET"])
def handle_artist_download(artist_id): def handle_artist_download(artist_id):
""" """
Enqueues album download tasks for the given artist. Enqueues album download tasks for the given artist.
@@ -46,14 +44,14 @@ def handle_artist_download(artist_id):
url = f"https://open.spotify.com/artist/{artist_id}" url = f"https://open.spotify.com/artist/{artist_id}"
# Retrieve essential parameters from the request. # Retrieve essential parameters from the request.
album_type = request.args.get('album_type', "album,single,compilation") album_type = request.args.get("album_type", "album,single,compilation")
# Validate required parameters # Validate required parameters
if not url: # This check is mostly for safety, as url is constructed if not url: # This check is mostly for safety, as url is constructed
return Response( return Response(
json.dumps({"error": "Missing required parameter: url"}), json.dumps({"error": "Missing required parameter: url"}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
try: try:
@@ -62,39 +60,41 @@ def handle_artist_download(artist_id):
# Delegate to the download_artist_albums function which will handle album filtering # Delegate to the download_artist_albums function which will handle album filtering
successfully_queued_albums, duplicate_albums = download_artist_albums( successfully_queued_albums, duplicate_albums = download_artist_albums(
url=url, url=url, album_type=album_type, request_args=request.args.to_dict()
album_type=album_type,
request_args=request.args.to_dict()
) )
# Return the list of album task IDs. # Return the list of album task IDs.
response_data = { response_data = {
"status": "complete", "status": "complete",
"message": f"Artist discography processing initiated. {len(successfully_queued_albums)} albums queued.", "message": f"Artist discography processing initiated. {len(successfully_queued_albums)} albums queued.",
"queued_albums": successfully_queued_albums "queued_albums": successfully_queued_albums,
} }
if duplicate_albums: if duplicate_albums:
response_data["duplicate_albums"] = duplicate_albums response_data["duplicate_albums"] = duplicate_albums
response_data["message"] += f" {len(duplicate_albums)} albums were already in progress or queued." response_data["message"] += (
f" {len(duplicate_albums)} albums were already in progress or queued."
)
return Response( return Response(
json.dumps(response_data), json.dumps(response_data),
status=202, # Still 202 Accepted as some operations may have succeeded status=202, # Still 202 Accepted as some operations may have succeeded
mimetype='application/json' mimetype="application/json",
) )
except Exception as e: except Exception as e:
return Response( return Response(
json.dumps({ json.dumps(
{
"status": "error", "status": "error",
"message": str(e), "message": str(e),
"traceback": traceback.format_exc() "traceback": traceback.format_exc(),
}), }
),
status=500, status=500,
mimetype='application/json' mimetype="application/json",
) )
@artist_bp.route('/download/cancel', methods=['GET']) @artist_bp.route("/download/cancel", methods=["GET"])
def cancel_artist_download(): def cancel_artist_download():
""" """
Cancelling an artist download is not supported since the endpoint only enqueues album tasks. Cancelling an artist download is not supported since the endpoint only enqueues album tasks.
@@ -103,23 +103,23 @@ def cancel_artist_download():
return Response( return Response(
json.dumps({"error": "Artist download cancellation is not supported."}), json.dumps({"error": "Artist download cancellation is not supported."}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
@artist_bp.route('/info', methods=['GET']) @artist_bp.route("/info", methods=["GET"])
def get_artist_info(): def get_artist_info():
""" """
Retrieves Spotify artist metadata given a Spotify artist ID. Retrieves Spotify artist metadata given a Spotify artist ID.
Expects a query parameter 'id' with the Spotify artist ID. Expects a query parameter 'id' with the Spotify artist ID.
""" """
spotify_id = request.args.get('id') spotify_id = request.args.get("id")
if not spotify_id: if not spotify_id:
return Response( return Response(
json.dumps({"error": "Missing parameter: id"}), json.dumps({"error": "Missing parameter: id"}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
try: try:
@@ -127,36 +127,37 @@ def get_artist_info():
# If artist_info is successfully fetched (it contains album items), # If artist_info is successfully fetched (it contains album items),
# check if the artist is watched and augment album items with is_locally_known status # check if the artist is watched and augment album items with is_locally_known status
if artist_info and artist_info.get('items'): if artist_info and artist_info.get("items"):
watched_artist_details = get_watched_artist(spotify_id) # spotify_id is the artist ID watched_artist_details = get_watched_artist(
spotify_id
) # spotify_id is the artist ID
if watched_artist_details: # Artist is being watched if watched_artist_details: # Artist is being watched
for album_item in artist_info['items']: for album_item in artist_info["items"]:
if album_item and album_item.get('id'): if album_item and album_item.get("id"):
album_id = album_item['id'] album_id = album_item["id"]
album_item['is_locally_known'] = is_album_in_artist_db(spotify_id, album_id) album_item["is_locally_known"] = is_album_in_artist_db(
spotify_id, album_id
)
elif album_item: # Album object exists but no ID elif album_item: # Album object exists but no ID
album_item['is_locally_known'] = False album_item["is_locally_known"] = False
# If not watched, or no albums, is_locally_known will not be added. # If not watched, or no albums, is_locally_known will not be added.
# Frontend should handle absence of this key as false. # Frontend should handle absence of this key as false.
return Response( return Response(
json.dumps(artist_info), json.dumps(artist_info), status=200, mimetype="application/json"
status=200,
mimetype='application/json'
) )
except Exception as e: except Exception as e:
return Response( return Response(
json.dumps({ json.dumps({"error": str(e), "traceback": traceback.format_exc()}),
"error": str(e),
"traceback": traceback.format_exc()
}),
status=500, status=500,
mimetype='application/json' mimetype="application/json",
) )
# --- Merged Artist Watch Routes --- # --- Merged Artist Watch Routes ---
@artist_bp.route('/watch/<string:artist_spotify_id>', methods=['PUT'])
@artist_bp.route("/watch/<string:artist_spotify_id>", methods=["PUT"])
def add_artist_to_watchlist(artist_spotify_id): def add_artist_to_watchlist(artist_spotify_id):
"""Adds an artist to the watchlist.""" """Adds an artist to the watchlist."""
watch_config = get_watch_config() watch_config = get_watch_config()
@@ -166,31 +167,60 @@ def add_artist_to_watchlist(artist_spotify_id):
logger.info(f"Attempting to add artist {artist_spotify_id} to watchlist.") logger.info(f"Attempting to add artist {artist_spotify_id} to watchlist.")
try: try:
if get_watched_artist(artist_spotify_id): if get_watched_artist(artist_spotify_id):
return jsonify({"message": f"Artist {artist_spotify_id} is already being watched."}), 200 return jsonify(
{"message": f"Artist {artist_spotify_id} is already being watched."}
), 200
# This call returns an album list-like structure based on logs # This call returns an album list-like structure based on logs
artist_album_list_data = get_spotify_info(artist_spotify_id, "artist_discography") artist_album_list_data = get_spotify_info(
artist_spotify_id, "artist_discography"
)
# Check if we got any data and if it has items # Check if we got any data and if it has items
if not artist_album_list_data or not isinstance(artist_album_list_data.get('items'), list): if not artist_album_list_data or not isinstance(
logger.error(f"Could not fetch album list details for artist {artist_spotify_id} from Spotify using get_spotify_info('artist_discography'). Data: {artist_album_list_data}") artist_album_list_data.get("items"), list
return jsonify({"error": f"Could not fetch sufficient details for artist {artist_spotify_id} to initiate watch."}), 404 ):
logger.error(
f"Could not fetch album list details for artist {artist_spotify_id} from Spotify using get_spotify_info('artist_discography'). Data: {artist_album_list_data}"
)
return jsonify(
{
"error": f"Could not fetch sufficient details for artist {artist_spotify_id} to initiate watch."
}
), 404
# Attempt to extract artist name and verify ID # Attempt to extract artist name and verify ID
# The actual artist name might be consistently found in the items, if they exist # The actual artist name might be consistently found in the items, if they exist
artist_name_from_albums = "Unknown Artist" # Default artist_name_from_albums = "Unknown Artist" # Default
if artist_album_list_data['items']: if artist_album_list_data["items"]:
first_album = artist_album_list_data['items'][0] first_album = artist_album_list_data["items"][0]
if first_album and isinstance(first_album.get('artists'), list) and first_album['artists']: if (
first_album
and isinstance(first_album.get("artists"), list)
and first_album["artists"]
):
# Find the artist in the list that matches the artist_spotify_id # Find the artist in the list that matches the artist_spotify_id
found_artist = next((art for art in first_album['artists'] if art.get('id') == artist_spotify_id), None) found_artist = next(
if found_artist and found_artist.get('name'): (
artist_name_from_albums = found_artist['name'] art
elif first_album['artists'][0].get('name'): # Fallback to first artist if specific match not found or no ID for art in first_album["artists"]
artist_name_from_albums = first_album['artists'][0]['name'] if art.get("id") == artist_spotify_id
logger.warning(f"Could not find exact artist ID {artist_spotify_id} in first album's artists list. Using name '{artist_name_from_albums}'.") ),
None,
)
if found_artist and found_artist.get("name"):
artist_name_from_albums = found_artist["name"]
elif first_album["artists"][0].get(
"name"
): # Fallback to first artist if specific match not found or no ID
artist_name_from_albums = first_album["artists"][0]["name"]
logger.warning(
f"Could not find exact artist ID {artist_spotify_id} in first album's artists list. Using name '{artist_name_from_albums}'."
)
else: else:
logger.warning(f"No album items found for artist {artist_spotify_id} to extract name. Using default.") logger.warning(
f"No album items found for artist {artist_spotify_id} to extract name. Using default."
)
# Construct the artist_data object expected by add_artist_db # Construct the artist_data object expected by add_artist_db
# We use the provided artist_spotify_id as the primary ID. # We use the provided artist_spotify_id as the primary ID.
@@ -198,20 +228,29 @@ def add_artist_to_watchlist(artist_spotify_id):
"id": artist_spotify_id, # This is the crucial part "id": artist_spotify_id, # This is the crucial part
"name": artist_name_from_albums, "name": artist_name_from_albums,
"albums": { # Mimic structure if add_artist_db expects it for total_albums "albums": { # Mimic structure if add_artist_db expects it for total_albums
"total": artist_album_list_data.get('total', 0) "total": artist_album_list_data.get("total", 0)
} },
# Add any other fields add_artist_db might expect from a true artist object if necessary # Add any other fields add_artist_db might expect from a true artist object if necessary
} }
add_artist_db(artist_data_for_db) add_artist_db(artist_data_for_db)
logger.info(f"Artist {artist_spotify_id} ('{artist_name_from_albums}') added to watchlist. Their albums will be processed by the watch manager.") logger.info(
return jsonify({"message": f"Artist {artist_spotify_id} added to watchlist. Albums will be processed shortly."}), 201 f"Artist {artist_spotify_id} ('{artist_name_from_albums}') added to watchlist. Their albums will be processed by the watch manager."
)
return jsonify(
{
"message": f"Artist {artist_spotify_id} added to watchlist. Albums will be processed shortly."
}
), 201
except Exception as e: except Exception as e:
logger.error(f"Error adding artist {artist_spotify_id} to watchlist: {e}", exc_info=True) logger.error(
f"Error adding artist {artist_spotify_id} to watchlist: {e}", exc_info=True
)
return jsonify({"error": f"Could not add artist to watchlist: {str(e)}"}), 500 return jsonify({"error": f"Could not add artist to watchlist: {str(e)}"}), 500
@artist_bp.route('/watch/<string:artist_spotify_id>/status', methods=['GET'])
@artist_bp.route("/watch/<string:artist_spotify_id>/status", methods=["GET"])
def get_artist_watch_status(artist_spotify_id): def get_artist_watch_status(artist_spotify_id):
"""Checks if a specific artist is being watched.""" """Checks if a specific artist is being watched."""
logger.info(f"Checking watch status for artist {artist_spotify_id}.") logger.info(f"Checking watch status for artist {artist_spotify_id}.")
@@ -222,10 +261,14 @@ def get_artist_watch_status(artist_spotify_id):
else: else:
return jsonify({"is_watched": False}), 200 return jsonify({"is_watched": False}), 200
except Exception as e: except Exception as e:
logger.error(f"Error checking watch status for artist {artist_spotify_id}: {e}", exc_info=True) logger.error(
f"Error checking watch status for artist {artist_spotify_id}: {e}",
exc_info=True,
)
return jsonify({"error": f"Could not check watch status: {str(e)}"}), 500 return jsonify({"error": f"Could not check watch status: {str(e)}"}), 500
@artist_bp.route('/watch/<string:artist_spotify_id>', methods=['DELETE'])
@artist_bp.route("/watch/<string:artist_spotify_id>", methods=["DELETE"])
def remove_artist_from_watchlist(artist_spotify_id): def remove_artist_from_watchlist(artist_spotify_id):
"""Removes an artist from the watchlist.""" """Removes an artist from the watchlist."""
watch_config = get_watch_config() watch_config = get_watch_config()
@@ -235,16 +278,26 @@ def remove_artist_from_watchlist(artist_spotify_id):
logger.info(f"Attempting to remove artist {artist_spotify_id} from watchlist.") logger.info(f"Attempting to remove artist {artist_spotify_id} from watchlist.")
try: try:
if not get_watched_artist(artist_spotify_id): if not get_watched_artist(artist_spotify_id):
return jsonify({"error": f"Artist {artist_spotify_id} not found in watchlist."}), 404 return jsonify(
{"error": f"Artist {artist_spotify_id} not found in watchlist."}
), 404
remove_artist_db(artist_spotify_id) remove_artist_db(artist_spotify_id)
logger.info(f"Artist {artist_spotify_id} removed from watchlist successfully.") logger.info(f"Artist {artist_spotify_id} removed from watchlist successfully.")
return jsonify({"message": f"Artist {artist_spotify_id} removed from watchlist."}), 200 return jsonify(
{"message": f"Artist {artist_spotify_id} removed from watchlist."}
), 200
except Exception as e: except Exception as e:
logger.error(f"Error removing artist {artist_spotify_id} from watchlist: {e}", exc_info=True) logger.error(
return jsonify({"error": f"Could not remove artist from watchlist: {str(e)}"}), 500 f"Error removing artist {artist_spotify_id} from watchlist: {e}",
exc_info=True,
)
return jsonify(
{"error": f"Could not remove artist from watchlist: {str(e)}"}
), 500
@artist_bp.route('/watch/list', methods=['GET'])
@artist_bp.route("/watch/list", methods=["GET"])
def list_watched_artists_endpoint(): def list_watched_artists_endpoint():
"""Lists all artists currently in the watchlist.""" """Lists all artists currently in the watchlist."""
try: try:
@@ -254,101 +307,201 @@ def list_watched_artists_endpoint():
logger.error(f"Error listing watched artists: {e}", exc_info=True) logger.error(f"Error listing watched artists: {e}", exc_info=True)
return jsonify({"error": f"Could not list watched artists: {str(e)}"}), 500 return jsonify({"error": f"Could not list watched artists: {str(e)}"}), 500
@artist_bp.route('/watch/trigger_check', methods=['POST'])
@artist_bp.route("/watch/trigger_check", methods=["POST"])
def trigger_artist_check_endpoint(): def trigger_artist_check_endpoint():
"""Manually triggers the artist checking mechanism for all watched artists.""" """Manually triggers the artist checking mechanism for all watched artists."""
watch_config = get_watch_config() watch_config = get_watch_config()
if not watch_config.get("enabled", False): if not watch_config.get("enabled", False):
return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403 return jsonify(
{
"error": "Watch feature is currently disabled globally. Cannot trigger check."
}
), 403
logger.info("Manual trigger for artist check received for all artists.") logger.info("Manual trigger for artist check received for all artists.")
try: try:
thread = threading.Thread(target=check_watched_artists, args=(None,)) thread = threading.Thread(target=check_watched_artists, args=(None,))
thread.start() thread.start()
return jsonify({"message": "Artist check triggered successfully in the background for all artists."}), 202 return jsonify(
{
"message": "Artist check triggered successfully in the background for all artists."
}
), 202
except Exception as e: except Exception as e:
logger.error(f"Error manually triggering artist check for all: {e}", exc_info=True) logger.error(
return jsonify({"error": f"Could not trigger artist check for all: {str(e)}"}), 500 f"Error manually triggering artist check for all: {e}", exc_info=True
)
return jsonify(
{"error": f"Could not trigger artist check for all: {str(e)}"}
), 500
@artist_bp.route('/watch/trigger_check/<string:artist_spotify_id>', methods=['POST'])
@artist_bp.route("/watch/trigger_check/<string:artist_spotify_id>", methods=["POST"])
def trigger_specific_artist_check_endpoint(artist_spotify_id: str): def trigger_specific_artist_check_endpoint(artist_spotify_id: str):
"""Manually triggers the artist checking mechanism for a specific artist.""" """Manually triggers the artist checking mechanism for a specific artist."""
watch_config = get_watch_config() watch_config = get_watch_config()
if not watch_config.get("enabled", False): if not watch_config.get("enabled", False):
return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403 return jsonify(
{
"error": "Watch feature is currently disabled globally. Cannot trigger check."
}
), 403
logger.info(f"Manual trigger for specific artist check received for ID: {artist_spotify_id}") logger.info(
f"Manual trigger for specific artist check received for ID: {artist_spotify_id}"
)
try: try:
watched_artist = get_watched_artist(artist_spotify_id) watched_artist = get_watched_artist(artist_spotify_id)
if not watched_artist: if not watched_artist:
logger.warning(f"Trigger specific check: Artist ID {artist_spotify_id} not found in watchlist.") logger.warning(
return jsonify({"error": f"Artist {artist_spotify_id} is not in the watchlist. Add it first."}), 404 f"Trigger specific check: Artist ID {artist_spotify_id} not found in watchlist."
)
return jsonify(
{
"error": f"Artist {artist_spotify_id} is not in the watchlist. Add it first."
}
), 404
thread = threading.Thread(target=check_watched_artists, args=(artist_spotify_id,)) thread = threading.Thread(
target=check_watched_artists, args=(artist_spotify_id,)
)
thread.start() thread.start()
logger.info(f"Artist check triggered in background for specific artist ID: {artist_spotify_id}") logger.info(
return jsonify({"message": f"Artist check triggered successfully in the background for {artist_spotify_id}."}), 202 f"Artist check triggered in background for specific artist ID: {artist_spotify_id}"
)
return jsonify(
{
"message": f"Artist check triggered successfully in the background for {artist_spotify_id}."
}
), 202
except Exception as e: except Exception as e:
logger.error(f"Error manually triggering specific artist check for {artist_spotify_id}: {e}", exc_info=True) logger.error(
return jsonify({"error": f"Could not trigger artist check for {artist_spotify_id}: {str(e)}"}), 500 f"Error manually triggering specific artist check for {artist_spotify_id}: {e}",
exc_info=True,
)
return jsonify(
{
"error": f"Could not trigger artist check for {artist_spotify_id}: {str(e)}"
}
), 500
@artist_bp.route('/watch/<string:artist_spotify_id>/albums', methods=['POST'])
@artist_bp.route("/watch/<string:artist_spotify_id>/albums", methods=["POST"])
def mark_albums_as_known_for_artist(artist_spotify_id): def mark_albums_as_known_for_artist(artist_spotify_id):
"""Fetches details for given album IDs and adds/updates them in the artist's local DB table.""" """Fetches details for given album IDs and adds/updates them in the artist's local DB table."""
watch_config = get_watch_config() watch_config = get_watch_config()
if not watch_config.get("enabled", False): if not watch_config.get("enabled", False):
return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark albums."}), 403 return jsonify(
{
"error": "Watch feature is currently disabled globally. Cannot mark albums."
}
), 403
logger.info(f"Attempting to mark albums as known for artist {artist_spotify_id}.") logger.info(f"Attempting to mark albums as known for artist {artist_spotify_id}.")
try: try:
album_ids = request.json album_ids = request.json
if not isinstance(album_ids, list) or not all(isinstance(aid, str) for aid in album_ids): if not isinstance(album_ids, list) or not all(
return jsonify({"error": "Invalid request body. Expecting a JSON array of album Spotify IDs."}), 400 isinstance(aid, str) for aid in album_ids
):
return jsonify(
{
"error": "Invalid request body. Expecting a JSON array of album Spotify IDs."
}
), 400
if not get_watched_artist(artist_spotify_id): if not get_watched_artist(artist_spotify_id):
return jsonify({"error": f"Artist {artist_spotify_id} is not being watched."}), 404 return jsonify(
{"error": f"Artist {artist_spotify_id} is not being watched."}
), 404
fetched_albums_details = [] fetched_albums_details = []
for album_id in album_ids: for album_id in album_ids:
try: try:
# We need full album details. get_spotify_info with type "album" should provide this. # We need full album details. get_spotify_info with type "album" should provide this.
album_detail = get_spotify_info(album_id, "album") album_detail = get_spotify_info(album_id, "album")
if album_detail and album_detail.get('id'): if album_detail and album_detail.get("id"):
fetched_albums_details.append(album_detail) fetched_albums_details.append(album_detail)
else: else:
logger.warning(f"Could not fetch details for album {album_id} when marking as known for artist {artist_spotify_id}.") logger.warning(
f"Could not fetch details for album {album_id} when marking as known for artist {artist_spotify_id}."
)
except Exception as e: except Exception as e:
logger.error(f"Failed to fetch Spotify details for album {album_id}: {e}") logger.error(
f"Failed to fetch Spotify details for album {album_id}: {e}"
)
if not fetched_albums_details: if not fetched_albums_details:
return jsonify({"message": "No valid album details could be fetched to mark as known.", "processed_count": 0}), 200 return jsonify(
{
"message": "No valid album details could be fetched to mark as known.",
"processed_count": 0,
}
), 200
processed_count = add_specific_albums_to_artist_table(artist_spotify_id, fetched_albums_details) processed_count = add_specific_albums_to_artist_table(
logger.info(f"Successfully marked/updated {processed_count} albums as known for artist {artist_spotify_id}.") artist_spotify_id, fetched_albums_details
return jsonify({"message": f"Successfully processed {processed_count} albums for artist {artist_spotify_id}."}), 200 )
logger.info(
f"Successfully marked/updated {processed_count} albums as known for artist {artist_spotify_id}."
)
return jsonify(
{
"message": f"Successfully processed {processed_count} albums for artist {artist_spotify_id}."
}
), 200
except Exception as e: except Exception as e:
logger.error(f"Error marking albums as known for artist {artist_spotify_id}: {e}", exc_info=True) logger.error(
f"Error marking albums as known for artist {artist_spotify_id}: {e}",
exc_info=True,
)
return jsonify({"error": f"Could not mark albums as known: {str(e)}"}), 500 return jsonify({"error": f"Could not mark albums as known: {str(e)}"}), 500
@artist_bp.route('/watch/<string:artist_spotify_id>/albums', methods=['DELETE'])
@artist_bp.route("/watch/<string:artist_spotify_id>/albums", methods=["DELETE"])
def mark_albums_as_missing_locally_for_artist(artist_spotify_id): def mark_albums_as_missing_locally_for_artist(artist_spotify_id):
"""Removes specified albums from the artist's local DB table.""" """Removes specified albums from the artist's local DB table."""
watch_config = get_watch_config() watch_config = get_watch_config()
if not watch_config.get("enabled", False): if not watch_config.get("enabled", False):
return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark albums."}), 403 return jsonify(
{
"error": "Watch feature is currently disabled globally. Cannot mark albums."
}
), 403
logger.info(f"Attempting to mark albums as missing (delete locally) for artist {artist_spotify_id}.") logger.info(
f"Attempting to mark albums as missing (delete locally) for artist {artist_spotify_id}."
)
try: try:
album_ids = request.json album_ids = request.json
if not isinstance(album_ids, list) or not all(isinstance(aid, str) for aid in album_ids): if not isinstance(album_ids, list) or not all(
return jsonify({"error": "Invalid request body. Expecting a JSON array of album Spotify IDs."}), 400 isinstance(aid, str) for aid in album_ids
):
return jsonify(
{
"error": "Invalid request body. Expecting a JSON array of album Spotify IDs."
}
), 400
if not get_watched_artist(artist_spotify_id): if not get_watched_artist(artist_spotify_id):
return jsonify({"error": f"Artist {artist_spotify_id} is not being watched."}), 404 return jsonify(
{"error": f"Artist {artist_spotify_id} is not being watched."}
), 404
deleted_count = remove_specific_albums_from_artist_table(artist_spotify_id, album_ids) deleted_count = remove_specific_albums_from_artist_table(
logger.info(f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}.") artist_spotify_id, album_ids
return jsonify({"message": f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}."}), 200 )
logger.info(
f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}."
)
return jsonify(
{
"message": f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}."
}
), 200
except Exception as e: except Exception as e:
logger.error(f"Error marking albums as missing (deleting locally) for artist {artist_spotify_id}: {e}", exc_info=True) logger.error(
f"Error marking albums as missing (deleting locally) for artist {artist_spotify_id}: {e}",
exc_info=True,
)
return jsonify({"error": f"Could not mark albums as missing: {str(e)}"}), 500 return jsonify({"error": f"Could not mark albums as missing: {str(e)}"}), 500

View File

@@ -1,65 +1,55 @@
from flask import Blueprint, jsonify, request from flask import Blueprint, jsonify, request
import json import json
from pathlib import Path
import logging import logging
import threading
import time
import os import os
from typing import Any
# Import the centralized config getters that handle file creation and defaults # Import the centralized config getters that handle file creation and defaults
from routes.utils.celery_config import get_config_params as get_main_config_params, DEFAULT_MAIN_CONFIG, CONFIG_FILE_PATH as MAIN_CONFIG_FILE_PATH from routes.utils.celery_config import (
from routes.utils.watch.manager import get_watch_config as get_watch_manager_config, DEFAULT_WATCH_CONFIG, CONFIG_FILE_PATH as WATCH_CONFIG_FILE_PATH get_config_params as get_main_config_params,
DEFAULT_MAIN_CONFIG,
CONFIG_FILE_PATH as MAIN_CONFIG_FILE_PATH,
)
from routes.utils.watch.manager import (
get_watch_config as get_watch_manager_config,
DEFAULT_WATCH_CONFIG,
CONFIG_FILE_PATH as WATCH_CONFIG_FILE_PATH,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
config_bp = Blueprint('config', __name__) config_bp = Blueprint("config", __name__)
# Path to main config file (consistent with celery_config.py)
# CONFIG_PATH = Path('./data/config/main.json') # Defined as MAIN_CONFIG_FILE_PATH from import
# Path to watch config file (consistent with watch/manager.py)
# WATCH_CONFIG_PATH = Path('./data/config/watch.json') # Defined as WATCH_CONFIG_FILE_PATH from import
# Flag for config change notifications # Flag for config change notifications
config_changed = False config_changed = False
last_config = {} last_config: dict[str, Any] = {}
# Define parameters that should trigger notification when changed # Define parameters that should trigger notification when changed
NOTIFY_PARAMETERS = [ NOTIFY_PARAMETERS = [
'maxConcurrentDownloads', "maxConcurrentDownloads",
'service', "service",
'fallback', "fallback",
'spotifyQuality', "spotifyQuality",
'deezerQuality' "deezerQuality",
] ]
# Helper to get main config (uses the one from celery_config) # Helper to get main config (uses the one from celery_config)
def get_config(): def get_config():
"""Retrieves the main configuration, creating it with defaults if necessary.""" """Retrieves the main configuration, creating it with defaults if necessary."""
return get_main_config_params() return get_main_config_params()
# Helper to save main config # Helper to save main config
def save_config(config_data): def save_config(config_data):
"""Saves the main configuration data to main.json.""" """Saves the main configuration data to main.json."""
try: try:
MAIN_CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True) MAIN_CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True)
# Ensure all default keys are present before saving, merging if necessary
current_defaults = DEFAULT_MAIN_CONFIG.copy()
# Overlay provided data on defaults to ensure all keys are there.
# This might not be ideal if user explicitly wants to remove a key,
# but for this setup, ensuring defaults is safer.
# A better approach for full PUT might be to replace entirely,
# but for ensuring defaults, this is okay.
# Let's assume config_data is what the user intends fully.
# We'll rely on get_config_params to have already populated defaults if the file was new.
# When saving, we should just save what's given, after ensuring it has necessary structure.
# Merge with defaults to ensure all keys are present
# This ensures that if a user POSTs partial data, it's merged with existing/default structure
# Load current or default config # Load current or default config
existing_config = {} existing_config = {}
if MAIN_CONFIG_FILE_PATH.exists(): if MAIN_CONFIG_FILE_PATH.exists():
with open(MAIN_CONFIG_FILE_PATH, 'r') as f_read: with open(MAIN_CONFIG_FILE_PATH, "r") as f_read:
existing_config = json.load(f_read) existing_config = json.load(f_read)
else: # Should be rare if get_config_params was called else: # Should be rare if get_config_params was called
existing_config = DEFAULT_MAIN_CONFIG.copy() existing_config = DEFAULT_MAIN_CONFIG.copy()
@@ -73,7 +63,7 @@ def save_config(config_data):
if default_key not in existing_config: if default_key not in existing_config:
existing_config[default_key] = default_value existing_config[default_key] = default_value
with open(MAIN_CONFIG_FILE_PATH, 'w') as f: with open(MAIN_CONFIG_FILE_PATH, "w") as f:
json.dump(existing_config, f, indent=4) json.dump(existing_config, f, indent=4)
logger.info(f"Main configuration saved to {MAIN_CONFIG_FILE_PATH}") logger.info(f"Main configuration saved to {MAIN_CONFIG_FILE_PATH}")
return True, None return True, None
@@ -81,11 +71,13 @@ def save_config(config_data):
logger.error(f"Error saving main configuration: {e}", exc_info=True) logger.error(f"Error saving main configuration: {e}", exc_info=True)
return False, str(e) return False, str(e)
# Helper to get watch config (uses the one from watch/manager.py) # Helper to get watch config (uses the one from watch/manager.py)
def get_watch_config_http(): # Renamed to avoid conflict with the imported get_watch_config def get_watch_config_http(): # Renamed to avoid conflict with the imported get_watch_config
"""Retrieves the watch configuration, creating it with defaults if necessary.""" """Retrieves the watch configuration, creating it with defaults if necessary."""
return get_watch_manager_config() return get_watch_manager_config()
# Helper to save watch config # Helper to save watch config
def save_watch_config_http(watch_config_data): # Renamed def save_watch_config_http(watch_config_data): # Renamed
"""Saves the watch configuration data to watch.json.""" """Saves the watch configuration data to watch.json."""
@@ -95,7 +87,7 @@ def save_watch_config_http(watch_config_data): # Renamed
# Similar logic to save_config: merge with defaults/existing # Similar logic to save_config: merge with defaults/existing
existing_config = {} existing_config = {}
if WATCH_CONFIG_FILE_PATH.exists(): if WATCH_CONFIG_FILE_PATH.exists():
with open(WATCH_CONFIG_FILE_PATH, 'r') as f_read: with open(WATCH_CONFIG_FILE_PATH, "r") as f_read:
existing_config = json.load(f_read) existing_config = json.load(f_read)
else: # Should be rare if get_watch_manager_config was called else: # Should be rare if get_watch_manager_config was called
existing_config = DEFAULT_WATCH_CONFIG.copy() existing_config = DEFAULT_WATCH_CONFIG.copy()
@@ -107,7 +99,7 @@ def save_watch_config_http(watch_config_data): # Renamed
if default_key not in existing_config: if default_key not in existing_config:
existing_config[default_key] = default_value existing_config[default_key] = default_value
with open(WATCH_CONFIG_FILE_PATH, 'w') as f: with open(WATCH_CONFIG_FILE_PATH, "w") as f:
json.dump(existing_config, f, indent=4) json.dump(existing_config, f, indent=4)
logger.info(f"Watch configuration saved to {WATCH_CONFIG_FILE_PATH}") logger.info(f"Watch configuration saved to {WATCH_CONFIG_FILE_PATH}")
return True, None return True, None
@@ -115,7 +107,8 @@ def save_watch_config_http(watch_config_data): # Renamed
logger.error(f"Error saving watch configuration: {e}", exc_info=True) logger.error(f"Error saving watch configuration: {e}", exc_info=True)
return False, str(e) return False, str(e)
@config_bp.route('/config', methods=['GET'])
@config_bp.route("/config", methods=["GET"])
def handle_config(): def handle_config():
"""Handles GET requests for the main configuration.""" """Handles GET requests for the main configuration."""
try: try:
@@ -123,9 +116,12 @@ def handle_config():
return jsonify(config) return jsonify(config)
except Exception as e: except Exception as e:
logger.error(f"Error in GET /config: {e}", exc_info=True) logger.error(f"Error in GET /config: {e}", exc_info=True)
return jsonify({"error": "Failed to retrieve configuration", "details": str(e)}), 500 return jsonify(
{"error": "Failed to retrieve configuration", "details": str(e)}
), 500
@config_bp.route('/config', methods=['POST', 'PUT'])
@config_bp.route("/config", methods=["POST", "PUT"])
def update_config(): def update_config():
"""Handles POST/PUT requests to update the main configuration.""" """Handles POST/PUT requests to update the main configuration."""
try: try:
@@ -133,12 +129,9 @@ def update_config():
if not isinstance(new_config, dict): if not isinstance(new_config, dict):
return jsonify({"error": "Invalid config format"}), 400 return jsonify({"error": "Invalid config format"}), 400
# Get existing config to preserve environment-controlled values
existing_config = get_config() or {}
# Preserve the explicitFilter setting from environment # Preserve the explicitFilter setting from environment
explicit_filter_env = os.environ.get('EXPLICIT_FILTER', 'false').lower() explicit_filter_env = os.environ.get("EXPLICIT_FILTER", "false").lower()
new_config['explicitFilter'] = explicit_filter_env in ('true', '1', 'yes', 'on') new_config["explicitFilter"] = explicit_filter_env in ("true", "1", "yes", "on")
success, error_msg = save_config(new_config) success, error_msg = save_config(new_config)
if success: if success:
@@ -147,33 +140,42 @@ def update_config():
if updated_config_values is None: if updated_config_values is None:
# This case should ideally not be reached if save_config succeeded # This case should ideally not be reached if save_config succeeded
# and get_config handles errors by returning a default or None. # and get_config handles errors by returning a default or None.
return jsonify({"error": "Failed to retrieve configuration after saving"}), 500 return jsonify(
{"error": "Failed to retrieve configuration after saving"}
), 500
return jsonify(updated_config_values) return jsonify(updated_config_values)
else: else:
return jsonify({"error": "Failed to update configuration", "details": error_msg}), 500 return jsonify(
{"error": "Failed to update configuration", "details": error_msg}
), 500
except json.JSONDecodeError: except json.JSONDecodeError:
return jsonify({"error": "Invalid JSON data"}), 400 return jsonify({"error": "Invalid JSON data"}), 400
except Exception as e: except Exception as e:
logger.error(f"Error in POST/PUT /config: {e}", exc_info=True) logger.error(f"Error in POST/PUT /config: {e}", exc_info=True)
return jsonify({"error": "Failed to update configuration", "details": str(e)}), 500 return jsonify(
{"error": "Failed to update configuration", "details": str(e)}
), 500
@config_bp.route('/config/check', methods=['GET'])
@config_bp.route("/config/check", methods=["GET"])
def check_config_changes(): def check_config_changes():
# This endpoint seems more related to dynamically checking if config changed # This endpoint seems more related to dynamically checking if config changed
# on disk, which might not be necessary if settings are applied on restart # on disk, which might not be necessary if settings are applied on restart
# or by a dedicated manager. For now, just return current config. # or by a dedicated manager. For now, just return current config.
try: try:
config = get_config() config = get_config()
return jsonify({ return jsonify(
"message": "Current configuration retrieved.", {"message": "Current configuration retrieved.", "config": config}
"config": config )
})
except Exception as e: except Exception as e:
logger.error(f"Error in GET /config/check: {e}", exc_info=True) logger.error(f"Error in GET /config/check: {e}", exc_info=True)
return jsonify({"error": "Failed to check configuration", "details": str(e)}), 500 return jsonify(
{"error": "Failed to check configuration", "details": str(e)}
), 500
@config_bp.route('/config/watch', methods=['GET'])
@config_bp.route("/config/watch", methods=["GET"])
def handle_watch_config(): def handle_watch_config():
"""Handles GET requests for the watch configuration.""" """Handles GET requests for the watch configuration."""
try: try:
@@ -181,9 +183,12 @@ def handle_watch_config():
return jsonify(watch_config) return jsonify(watch_config)
except Exception as e: except Exception as e:
logger.error(f"Error in GET /config/watch: {e}", exc_info=True) logger.error(f"Error in GET /config/watch: {e}", exc_info=True)
return jsonify({"error": "Failed to retrieve watch configuration", "details": str(e)}), 500 return jsonify(
{"error": "Failed to retrieve watch configuration", "details": str(e)}
), 500
@config_bp.route('/config/watch', methods=['POST', 'PUT'])
@config_bp.route("/config/watch", methods=["POST", "PUT"])
def update_watch_config(): def update_watch_config():
"""Handles POST/PUT requests to update the watch configuration.""" """Handles POST/PUT requests to update the watch configuration."""
try: try:
@@ -195,9 +200,13 @@ def update_watch_config():
if success: if success:
return jsonify({"message": "Watch configuration updated successfully"}), 200 return jsonify({"message": "Watch configuration updated successfully"}), 200
else: else:
return jsonify({"error": "Failed to update watch configuration", "details": error_msg}), 500 return jsonify(
{"error": "Failed to update watch configuration", "details": error_msg}
), 500
except json.JSONDecodeError: except json.JSONDecodeError:
return jsonify({"error": "Invalid JSON data for watch config"}), 400 return jsonify({"error": "Invalid JSON data for watch config"}), 400
except Exception as e: except Exception as e:
logger.error(f"Error in POST/PUT /config/watch: {e}", exc_info=True) logger.error(f"Error in POST/PUT /config/watch: {e}", exc_info=True)
return jsonify({"error": "Failed to update watch configuration", "details": str(e)}), 500 return jsonify(
{"error": "Failed to update watch configuration", "details": str(e)}
), 500

View File

@@ -8,59 +8,75 @@ from routes.utils.credentials import (
init_credentials_db, init_credentials_db,
# Import new utility functions for global Spotify API creds # Import new utility functions for global Spotify API creds
_get_global_spotify_api_creds, _get_global_spotify_api_creds,
save_global_spotify_api_creds save_global_spotify_api_creds,
) )
from pathlib import Path
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
credentials_bp = Blueprint('credentials', __name__) credentials_bp = Blueprint("credentials", __name__)
# Initialize the database and tables when the blueprint is loaded # Initialize the database and tables when the blueprint is loaded
init_credentials_db() init_credentials_db()
@credentials_bp.route('/spotify_api_config', methods=['GET', 'PUT'])
@credentials_bp.route("/spotify_api_config", methods=["GET", "PUT"])
def handle_spotify_api_config(): def handle_spotify_api_config():
"""Handles GET and PUT requests for the global Spotify API client_id and client_secret.""" """Handles GET and PUT requests for the global Spotify API client_id and client_secret."""
try: try:
if request.method == 'GET': if request.method == "GET":
client_id, client_secret = _get_global_spotify_api_creds() client_id, client_secret = _get_global_spotify_api_creds()
if client_id is not None and client_secret is not None: if client_id is not None and client_secret is not None:
return jsonify({"client_id": client_id, "client_secret": client_secret}), 200 return jsonify(
{"client_id": client_id, "client_secret": client_secret}
), 200
else: else:
# If search.json exists but is empty/incomplete, or doesn't exist # If search.json exists but is empty/incomplete, or doesn't exist
return jsonify({ return jsonify(
{
"warning": "Global Spotify API credentials are not fully configured or file is missing.", "warning": "Global Spotify API credentials are not fully configured or file is missing.",
"client_id": client_id or "", "client_id": client_id or "",
"client_secret": client_secret or "" "client_secret": client_secret or "",
}), 200 }
), 200
elif request.method == 'PUT': elif request.method == "PUT":
data = request.get_json() data = request.get_json()
if not data or 'client_id' not in data or 'client_secret' not in data: if not data or "client_id" not in data or "client_secret" not in data:
return jsonify({"error": "Request body must contain 'client_id' and 'client_secret'"}), 400 return jsonify(
{
"error": "Request body must contain 'client_id' and 'client_secret'"
}
), 400
client_id = data['client_id'] client_id = data["client_id"]
client_secret = data['client_secret'] client_secret = data["client_secret"]
if not isinstance(client_id, str) or not isinstance(client_secret, str): if not isinstance(client_id, str) or not isinstance(client_secret, str):
return jsonify({"error": "'client_id' and 'client_secret' must be strings"}), 400 return jsonify(
{"error": "'client_id' and 'client_secret' must be strings"}
), 400
if save_global_spotify_api_creds(client_id, client_secret): if save_global_spotify_api_creds(client_id, client_secret):
return jsonify({"message": "Global Spotify API credentials updated successfully."}), 200 return jsonify(
{"message": "Global Spotify API credentials updated successfully."}
), 200
else: else:
return jsonify({"error": "Failed to save global Spotify API credentials."}), 500 return jsonify(
{"error": "Failed to save global Spotify API credentials."}
), 500
except Exception as e: except Exception as e:
logger.error(f"Error in /spotify_api_config: {e}", exc_info=True) logger.error(f"Error in /spotify_api_config: {e}", exc_info=True)
return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500 return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500
@credentials_bp.route('/<service>', methods=['GET']) @credentials_bp.route("/<service>", methods=["GET"])
def handle_list_credentials(service): def handle_list_credentials(service):
try: try:
if service not in ['spotify', 'deezer']: if service not in ["spotify", "deezer"]:
return jsonify({"error": "Invalid service. Must be 'spotify' or 'deezer'"}), 400 return jsonify(
{"error": "Invalid service. Must be 'spotify' or 'deezer'"}
), 400
return jsonify(list_credentials(service)) return jsonify(list_credentials(service))
except ValueError as e: # Should not happen with service check above except ValueError as e: # Should not happen with service check above
return jsonify({"error": str(e)}), 400 return jsonify({"error": str(e)}), 400
@@ -68,20 +84,23 @@ def handle_list_credentials(service):
logger.error(f"Error listing credentials for {service}: {e}", exc_info=True) logger.error(f"Error listing credentials for {service}: {e}", exc_info=True)
return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500 return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500
@credentials_bp.route('/<service>/<name>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@credentials_bp.route("/<service>/<name>", methods=["GET", "POST", "PUT", "DELETE"])
def handle_single_credential(service, name): def handle_single_credential(service, name):
try: try:
if service not in ['spotify', 'deezer']: if service not in ["spotify", "deezer"]:
return jsonify({"error": "Invalid service. Must be 'spotify' or 'deezer'"}), 400 return jsonify(
{"error": "Invalid service. Must be 'spotify' or 'deezer'"}
), 400
# cred_type logic is removed for Spotify as API keys are global. # cred_type logic is removed for Spotify as API keys are global.
# For Deezer, it's always 'credentials' type implicitly. # For Deezer, it's always 'credentials' type implicitly.
if request.method == 'GET': if request.method == "GET":
# get_credential for Spotify now only returns region and blob_file_path # get_credential for Spotify now only returns region and blob_file_path
return jsonify(get_credential(service, name)) return jsonify(get_credential(service, name))
elif request.method == 'POST': elif request.method == "POST":
data = request.get_json() data = request.get_json()
if not data: if not data:
return jsonify({"error": "Request body cannot be empty."}), 400 return jsonify({"error": "Request body cannot be empty."}), 400
@@ -89,21 +108,36 @@ def handle_single_credential(service, name):
# For Deezer, it expects 'arl' and 'region' # For Deezer, it expects 'arl' and 'region'
# Validation is handled within create_credential utility function # Validation is handled within create_credential utility function
result = create_credential(service, name, data) result = create_credential(service, name, data)
return jsonify({"message": f"Credential for '{name}' ({service}) created successfully.", "details": result}), 201 return jsonify(
{
"message": f"Credential for '{name}' ({service}) created successfully.",
"details": result,
}
), 201
elif request.method == 'PUT': elif request.method == "PUT":
data = request.get_json() data = request.get_json()
if not data: if not data:
return jsonify({"error": "Request body cannot be empty."}), 400 return jsonify({"error": "Request body cannot be empty."}), 400
# edit_credential for Spotify now handles updates to 'region', 'blob_content' # edit_credential for Spotify now handles updates to 'region', 'blob_content'
# For Deezer, 'arl', 'region' # For Deezer, 'arl', 'region'
result = edit_credential(service, name, data) result = edit_credential(service, name, data)
return jsonify({"message": f"Credential for '{name}' ({service}) updated successfully.", "details": result}) return jsonify(
{
"message": f"Credential for '{name}' ({service}) updated successfully.",
"details": result,
}
)
elif request.method == 'DELETE': elif request.method == "DELETE":
# delete_credential for Spotify also handles deleting the blob directory # delete_credential for Spotify also handles deleting the blob directory
result = delete_credential(service, name) result = delete_credential(service, name)
return jsonify({"message": f"Credential for '{name}' ({service}) deleted successfully.", "details": result}) return jsonify(
{
"message": f"Credential for '{name}' ({service}) deleted successfully.",
"details": result,
}
)
except (ValueError, FileNotFoundError, FileExistsError) as e: except (ValueError, FileNotFoundError, FileExistsError) as e:
status_code = 400 status_code = 400
@@ -117,14 +151,18 @@ def handle_single_credential(service, name):
logger.error(f"Server error in /<{service}>/<{name}>: {e}", exc_info=True) logger.error(f"Server error in /<{service}>/<{name}>: {e}", exc_info=True)
return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500 return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500
# The '/search/<service>/<name>' route is now obsolete for Spotify and has been removed. # The '/search/<service>/<name>' route is now obsolete for Spotify and has been removed.
@credentials_bp.route('/all/<service>', methods=['GET'])
@credentials_bp.route("/all/<service>", methods=["GET"])
def handle_all_credentials(service): def handle_all_credentials(service):
"""Lists all credentials for a given service. For Spotify, API keys are global and not listed per account.""" """Lists all credentials for a given service. For Spotify, API keys are global and not listed per account."""
try: try:
if service not in ['spotify', 'deezer']: if service not in ["spotify", "deezer"]:
return jsonify({"error": "Invalid service. Must be 'spotify' or 'deezer'"}), 400 return jsonify(
{"error": "Invalid service. Must be 'spotify' or 'deezer'"}
), 400
credentials_list = [] credentials_list = []
account_names = list_credentials(service) # This lists names from DB account_names = list_credentials(service) # This lists names from DB
@@ -137,17 +175,28 @@ def handle_all_credentials(service):
# We don't add global Spotify API keys here as they are separate # We don't add global Spotify API keys here as they are separate
credentials_list.append({"name": name, "details": account_data}) credentials_list.append({"name": name, "details": account_data})
except FileNotFoundError: except FileNotFoundError:
logger.warning(f"Credential name '{name}' listed for service '{service}' but not found by get_credential. Skipping.") logger.warning(
f"Credential name '{name}' listed for service '{service}' but not found by get_credential. Skipping."
)
except Exception as e_inner: except Exception as e_inner:
logger.error(f"Error fetching details for credential '{name}' ({service}): {e_inner}", exc_info=True) logger.error(
credentials_list.append({"name": name, "error": f"Could not retrieve details: {str(e_inner)}"}) f"Error fetching details for credential '{name}' ({service}): {e_inner}",
exc_info=True,
)
credentials_list.append(
{
"name": name,
"error": f"Could not retrieve details: {str(e_inner)}",
}
)
return jsonify(credentials_list) return jsonify(credentials_list)
except Exception as e: except Exception as e:
logger.error(f"Error in /all/{service}: {e}", exc_info=True) logger.error(f"Error in /all/{service}: {e}", exc_info=True)
return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500 return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500
@credentials_bp.route('/markets', methods=['GET'])
@credentials_bp.route("/markets", methods=["GET"])
def handle_markets(): def handle_markets():
""" """
Returns a list of unique market regions for Deezer and Spotify accounts. Returns a list of unique market regions for Deezer and Spotify accounts.
@@ -157,29 +206,35 @@ def handle_markets():
spotify_regions = set() spotify_regions = set()
# Process Deezer accounts # Process Deezer accounts
deezer_account_names = list_credentials('deezer') deezer_account_names = list_credentials("deezer")
for name in deezer_account_names: for name in deezer_account_names:
try: try:
account_data = get_credential('deezer', name) account_data = get_credential("deezer", name)
if account_data and 'region' in account_data and account_data['region']: if account_data and "region" in account_data and account_data["region"]:
deezer_regions.add(account_data['region']) deezer_regions.add(account_data["region"])
except Exception as e: except Exception as e:
logger.warning(f"Could not retrieve region for deezer account {name}: {e}") logger.warning(
f"Could not retrieve region for deezer account {name}: {e}"
)
# Process Spotify accounts # Process Spotify accounts
spotify_account_names = list_credentials('spotify') spotify_account_names = list_credentials("spotify")
for name in spotify_account_names: for name in spotify_account_names:
try: try:
account_data = get_credential('spotify', name) account_data = get_credential("spotify", name)
if account_data and 'region' in account_data and account_data['region']: if account_data and "region" in account_data and account_data["region"]:
spotify_regions.add(account_data['region']) spotify_regions.add(account_data["region"])
except Exception as e: except Exception as e:
logger.warning(f"Could not retrieve region for spotify account {name}: {e}") logger.warning(
f"Could not retrieve region for spotify account {name}: {e}"
)
return jsonify({ return jsonify(
{
"deezer": sorted(list(deezer_regions)), "deezer": sorted(list(deezer_regions)),
"spotify": sorted(list(spotify_regions)) "spotify": sorted(list(spotify_regions)),
}), 200 }
), 200
except Exception as e: except Exception as e:
logger.error(f"Error in /markets: {e}", exc_info=True) logger.error(f"Error in /markets: {e}", exc_info=True)

View File

@@ -3,40 +3,45 @@ from routes.utils.history_manager import get_history_entries
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
history_bp = Blueprint('history', __name__, url_prefix='/api/history') history_bp = Blueprint("history", __name__, url_prefix="/api/history")
@history_bp.route('', methods=['GET'])
@history_bp.route("", methods=["GET"])
def get_download_history(): def get_download_history():
"""API endpoint to retrieve download history with pagination, sorting, and filtering.""" """API endpoint to retrieve download history with pagination, sorting, and filtering."""
try: try:
limit = request.args.get('limit', 25, type=int) limit = request.args.get("limit", 25, type=int)
offset = request.args.get('offset', 0, type=int) offset = request.args.get("offset", 0, type=int)
sort_by = request.args.get('sort_by', 'timestamp_completed') sort_by = request.args.get("sort_by", "timestamp_completed")
sort_order = request.args.get('sort_order', 'DESC') sort_order = request.args.get("sort_order", "DESC")
# Basic filtering example: filter by status_final or download_type # Basic filtering example: filter by status_final or download_type
filters = {} filters = {}
status_filter = request.args.get('status_final') status_filter = request.args.get("status_final")
if status_filter: if status_filter:
filters['status_final'] = status_filter filters["status_final"] = status_filter
type_filter = request.args.get('download_type') type_filter = request.args.get("download_type")
if type_filter: if type_filter:
filters['download_type'] = type_filter filters["download_type"] = type_filter
# Add more filters as needed, e.g., by item_name (would need LIKE for partial match) # Add more filters as needed, e.g., by item_name (would need LIKE for partial match)
# search_term = request.args.get('search') # search_term = request.args.get('search')
# if search_term: # if search_term:
# filters['item_name'] = f'%{search_term}%' # This would require LIKE in get_history_entries # filters['item_name'] = f'%{search_term}%' # This would require LIKE in get_history_entries
entries, total_count = get_history_entries(limit, offset, sort_by, sort_order, filters) entries, total_count = get_history_entries(
limit, offset, sort_by, sort_order, filters
)
return jsonify({ return jsonify(
'entries': entries, {
'total_count': total_count, "entries": entries,
'limit': limit, "total_count": total_count,
'offset': offset "limit": limit,
}) "offset": offset,
}
)
except Exception as e: except Exception as e:
logger.error(f"Error in /api/history endpoint: {e}", exc_info=True) logger.error(f"Error in /api/history endpoint: {e}", exc_info=True)
return jsonify({"error": "Failed to retrieve download history"}), 500 return jsonify({"error": "Failed to retrieve download history"}), 500

View File

@@ -1,12 +1,15 @@
from flask import Blueprint, Response, request, jsonify from flask import Blueprint, Response, request, jsonify
import os
import json import json
import traceback import traceback
import logging # Added logging import import logging # Added logging import
import uuid # For generating error task IDs import uuid # For generating error task IDs
import time # For timestamps import time # For timestamps
from routes.utils.celery_queue_manager import download_queue_manager from routes.utils.celery_queue_manager import download_queue_manager
from routes.utils.celery_tasks import store_task_info, store_task_status, ProgressState # For error task creation from routes.utils.celery_tasks import (
store_task_info,
store_task_status,
ProgressState,
) # For error task creation
import threading # For playlist watch trigger import threading # For playlist watch trigger
# Imports from playlist_watch.py # Imports from playlist_watch.py
@@ -17,15 +20,19 @@ from routes.utils.watch.db import (
get_watched_playlists, get_watched_playlists,
add_specific_tracks_to_playlist_table, add_specific_tracks_to_playlist_table,
remove_specific_tracks_from_playlist_table, remove_specific_tracks_from_playlist_table,
is_track_in_playlist_db # Added import is_track_in_playlist_db, # Added import
) )
from routes.utils.get_info import get_spotify_info # Already used, but ensure it's here from routes.utils.get_info import get_spotify_info # Already used, but ensure it's here
from routes.utils.watch.manager import check_watched_playlists, get_watch_config # For manual trigger & config from routes.utils.watch.manager import (
check_watched_playlists,
get_watch_config,
) # For manual trigger & config
logger = logging.getLogger(__name__) # Added logger initialization logger = logging.getLogger(__name__) # Added logger initialization
playlist_bp = Blueprint('playlist', __name__, url_prefix='/api/playlist') playlist_bp = Blueprint("playlist", __name__, url_prefix="/api/playlist")
@playlist_bp.route('/download/<playlist_id>', methods=['GET'])
@playlist_bp.route("/download/<playlist_id>", methods=["GET"])
def handle_download(playlist_id): def handle_download(playlist_id):
# Retrieve essential parameters from the request. # Retrieve essential parameters from the request.
# name = request.args.get('name') # Removed # name = request.args.get('name') # Removed
@@ -34,28 +41,42 @@ def handle_download(playlist_id):
# Construct the URL from playlist_id # Construct the URL from playlist_id
url = f"https://open.spotify.com/playlist/{playlist_id}" url = f"https://open.spotify.com/playlist/{playlist_id}"
orig_params["original_url"] = request.url # Update original_url to the constructed one orig_params["original_url"] = (
request.url
) # Update original_url to the constructed one
# Fetch metadata from Spotify # Fetch metadata from Spotify
try: try:
playlist_info = get_spotify_info(playlist_id, "playlist") playlist_info = get_spotify_info(playlist_id, "playlist")
if not playlist_info or not playlist_info.get('name') or not playlist_info.get('owner'): if (
not playlist_info
or not playlist_info.get("name")
or not playlist_info.get("owner")
):
return Response( return Response(
json.dumps({"error": f"Could not retrieve metadata for playlist ID: {playlist_id}"}), json.dumps(
{
"error": f"Could not retrieve metadata for playlist ID: {playlist_id}"
}
),
status=404, status=404,
mimetype='application/json' mimetype="application/json",
) )
name_from_spotify = playlist_info.get('name') name_from_spotify = playlist_info.get("name")
# Use owner's display_name as the 'artist' for playlists # Use owner's display_name as the 'artist' for playlists
owner_info = playlist_info.get('owner', {}) owner_info = playlist_info.get("owner", {})
artist_from_spotify = owner_info.get('display_name', "Unknown Owner") artist_from_spotify = owner_info.get("display_name", "Unknown Owner")
except Exception as e: except Exception as e:
return Response( return Response(
json.dumps({"error": f"Failed to fetch metadata for playlist {playlist_id}: {str(e)}"}), json.dumps(
{
"error": f"Failed to fetch metadata for playlist {playlist_id}: {str(e)}"
}
),
status=500, status=500,
mimetype='application/json' mimetype="application/json",
) )
# Validate required parameters # Validate required parameters
@@ -63,83 +84,94 @@ def handle_download(playlist_id):
return Response( return Response(
json.dumps({"error": "Missing required parameter: url"}), json.dumps({"error": "Missing required parameter: url"}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
try: try:
task_id = download_queue_manager.add_task({ task_id = download_queue_manager.add_task(
{
"download_type": "playlist", "download_type": "playlist",
"url": url, "url": url,
"name": name_from_spotify, # Use fetched name "name": name_from_spotify, # Use fetched name
"artist": artist_from_spotify, # Use fetched owner name as artist "artist": artist_from_spotify, # Use fetched owner name as artist
"orig_request": orig_params "orig_request": orig_params,
}) }
)
# Removed DuplicateDownloadError handling, add_task now manages this by creating an error task. # Removed DuplicateDownloadError handling, add_task now manages this by creating an error task.
except Exception as e: except Exception as e:
# Generic error handling for other issues during task submission # Generic error handling for other issues during task submission
error_task_id = str(uuid.uuid4()) error_task_id = str(uuid.uuid4())
store_task_info(error_task_id, { store_task_info(
error_task_id,
{
"download_type": "playlist", "download_type": "playlist",
"url": url, "url": url,
"name": name_from_spotify, # Use fetched name "name": name_from_spotify, # Use fetched name
"artist": artist_from_spotify, # Use fetched owner name as artist "artist": artist_from_spotify, # Use fetched owner name as artist
"original_request": orig_params, "original_request": orig_params,
"created_at": time.time(), "created_at": time.time(),
"is_submission_error_task": True "is_submission_error_task": True,
}) },
store_task_status(error_task_id, { )
store_task_status(
error_task_id,
{
"status": ProgressState.ERROR, "status": ProgressState.ERROR,
"error": f"Failed to queue playlist download: {str(e)}", "error": f"Failed to queue playlist download: {str(e)}",
"timestamp": time.time() "timestamp": time.time(),
}) },
)
return Response( return Response(
json.dumps({"error": f"Failed to queue playlist download: {str(e)}", "task_id": error_task_id}), json.dumps(
{
"error": f"Failed to queue playlist download: {str(e)}",
"task_id": error_task_id,
}
),
status=500, status=500,
mimetype='application/json' mimetype="application/json",
) )
return Response( return Response(
json.dumps({"prg_file": task_id}), # prg_file is the old name for task_id json.dumps({"prg_file": task_id}), # prg_file is the old name for task_id
status=202, status=202,
mimetype='application/json' mimetype="application/json",
) )
@playlist_bp.route('/download/cancel', methods=['GET'])
@playlist_bp.route("/download/cancel", methods=["GET"])
def cancel_download(): def cancel_download():
""" """
Cancel a running playlist download process by its prg file name. Cancel a running playlist download process by its prg file name.
""" """
prg_file = request.args.get('prg_file') prg_file = request.args.get("prg_file")
if not prg_file: if not prg_file:
return Response( return Response(
json.dumps({"error": "Missing process id (prg_file) parameter"}), json.dumps({"error": "Missing process id (prg_file) parameter"}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
# Use the queue manager's cancellation method. # Use the queue manager's cancellation method.
result = download_queue_manager.cancel_task(prg_file) result = download_queue_manager.cancel_task(prg_file)
status_code = 200 if result.get("status") == "cancelled" else 404 status_code = 200 if result.get("status") == "cancelled" else 404
return Response( return Response(json.dumps(result), status=status_code, mimetype="application/json")
json.dumps(result),
status=status_code,
mimetype='application/json'
)
@playlist_bp.route('/info', methods=['GET'])
@playlist_bp.route("/info", methods=["GET"])
def get_playlist_info(): def get_playlist_info():
""" """
Retrieve Spotify playlist metadata given a Spotify playlist ID. Retrieve Spotify playlist metadata given a Spotify playlist ID.
Expects a query parameter 'id' that contains the Spotify playlist ID. Expects a query parameter 'id' that contains the Spotify playlist ID.
""" """
spotify_id = request.args.get('id') spotify_id = request.args.get("id")
if not spotify_id: if not spotify_id:
return Response( return Response(
json.dumps({"error": "Missing parameter: id"}), json.dumps({"error": "Missing parameter: id"}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
try: try:
@@ -148,36 +180,32 @@ def get_playlist_info():
# If playlist_info is successfully fetched, check if it's watched # If playlist_info is successfully fetched, check if it's watched
# and augment track items with is_locally_known status # and augment track items with is_locally_known status
if playlist_info and playlist_info.get('id'): if playlist_info and playlist_info.get("id"):
watched_playlist_details = get_watched_playlist(playlist_info['id']) watched_playlist_details = get_watched_playlist(playlist_info["id"])
if watched_playlist_details: # Playlist is being watched if watched_playlist_details: # Playlist is being watched
if playlist_info.get('tracks') and playlist_info['tracks'].get('items'): if playlist_info.get("tracks") and playlist_info["tracks"].get("items"):
for item in playlist_info['tracks']['items']: for item in playlist_info["tracks"]["items"]:
if item and item.get('track') and item['track'].get('id'): if item and item.get("track") and item["track"].get("id"):
track_id = item['track']['id'] track_id = item["track"]["id"]
item['track']['is_locally_known'] = is_track_in_playlist_db(playlist_info['id'], track_id) item["track"]["is_locally_known"] = is_track_in_playlist_db(
elif item and item.get('track'): # Track object exists but no ID playlist_info["id"], track_id
item['track']['is_locally_known'] = False )
elif item and item.get(
"track"
): # Track object exists but no ID
item["track"]["is_locally_known"] = False
# If not watched, or no tracks, is_locally_known will not be added, or tracks won't exist to add it to. # If not watched, or no tracks, is_locally_known will not be added, or tracks won't exist to add it to.
# Frontend should handle absence of this key as false. # Frontend should handle absence of this key as false.
return Response( return Response(
json.dumps(playlist_info), json.dumps(playlist_info), status=200, mimetype="application/json"
status=200,
mimetype='application/json'
) )
except Exception as e: except Exception as e:
error_data = { error_data = {"error": str(e), "traceback": traceback.format_exc()}
"error": str(e), return Response(json.dumps(error_data), status=500, mimetype="application/json")
"traceback": traceback.format_exc()
}
return Response(
json.dumps(error_data),
status=500,
mimetype='application/json'
)
@playlist_bp.route('/watch/<string:playlist_spotify_id>', methods=['PUT'])
@playlist_bp.route("/watch/<string:playlist_spotify_id>", methods=["PUT"])
def add_to_watchlist(playlist_spotify_id): def add_to_watchlist(playlist_spotify_id):
"""Adds a playlist to the watchlist.""" """Adds a playlist to the watchlist."""
watch_config = get_watch_config() watch_config = get_watch_config()
@@ -188,13 +216,21 @@ def add_to_watchlist(playlist_spotify_id):
try: try:
# Check if already watched # Check if already watched
if get_watched_playlist(playlist_spotify_id): if get_watched_playlist(playlist_spotify_id):
return jsonify({"message": f"Playlist {playlist_spotify_id} is already being watched."}), 200 return jsonify(
{"message": f"Playlist {playlist_spotify_id} is already being watched."}
), 200
# Fetch playlist details from Spotify to populate our DB # Fetch playlist details from Spotify to populate our DB
playlist_data = get_spotify_info(playlist_spotify_id, "playlist") playlist_data = get_spotify_info(playlist_spotify_id, "playlist")
if not playlist_data or 'id' not in playlist_data: if not playlist_data or "id" not in playlist_data:
logger.error(f"Could not fetch details for playlist {playlist_spotify_id} from Spotify.") logger.error(
return jsonify({"error": f"Could not fetch details for playlist {playlist_spotify_id} from Spotify."}), 404 f"Could not fetch details for playlist {playlist_spotify_id} from Spotify."
)
return jsonify(
{
"error": f"Could not fetch details for playlist {playlist_spotify_id} from Spotify."
}
), 404
add_playlist_db(playlist_data) # This also creates the tracks table add_playlist_db(playlist_data) # This also creates the tracks table
@@ -206,13 +242,23 @@ def add_to_watchlist(playlist_spotify_id):
# from routes.utils.watch.db import add_tracks_to_playlist_db # Keep local import for clarity # from routes.utils.watch.db import add_tracks_to_playlist_db # Keep local import for clarity
# add_tracks_to_playlist_db(playlist_spotify_id, initial_track_items) # add_tracks_to_playlist_db(playlist_spotify_id, initial_track_items)
logger.info(f"Playlist {playlist_spotify_id} added to watchlist. Its tracks will be processed by the watch manager.") logger.info(
return jsonify({"message": f"Playlist {playlist_spotify_id} added to watchlist. Tracks will be processed shortly."}), 201 f"Playlist {playlist_spotify_id} added to watchlist. Its tracks will be processed by the watch manager."
)
return jsonify(
{
"message": f"Playlist {playlist_spotify_id} added to watchlist. Tracks will be processed shortly."
}
), 201
except Exception as e: except Exception as e:
logger.error(f"Error adding playlist {playlist_spotify_id} to watchlist: {e}", exc_info=True) logger.error(
f"Error adding playlist {playlist_spotify_id} to watchlist: {e}",
exc_info=True,
)
return jsonify({"error": f"Could not add playlist to watchlist: {str(e)}"}), 500 return jsonify({"error": f"Could not add playlist to watchlist: {str(e)}"}), 500
@playlist_bp.route('/watch/<string:playlist_spotify_id>/status', methods=['GET'])
@playlist_bp.route("/watch/<string:playlist_spotify_id>/status", methods=["GET"])
def get_playlist_watch_status(playlist_spotify_id): def get_playlist_watch_status(playlist_spotify_id):
"""Checks if a specific playlist is being watched.""" """Checks if a specific playlist is being watched."""
logger.info(f"Checking watch status for playlist {playlist_spotify_id}.") logger.info(f"Checking watch status for playlist {playlist_spotify_id}.")
@@ -225,10 +271,14 @@ def get_playlist_watch_status(playlist_spotify_id):
# between "not watched" and an actual error fetching status. # between "not watched" and an actual error fetching status.
return jsonify({"is_watched": False}), 200 return jsonify({"is_watched": False}), 200
except Exception as e: except Exception as e:
logger.error(f"Error checking watch status for playlist {playlist_spotify_id}: {e}", exc_info=True) logger.error(
f"Error checking watch status for playlist {playlist_spotify_id}: {e}",
exc_info=True,
)
return jsonify({"error": f"Could not check watch status: {str(e)}"}), 500 return jsonify({"error": f"Could not check watch status: {str(e)}"}), 500
@playlist_bp.route('/watch/<string:playlist_spotify_id>', methods=['DELETE'])
@playlist_bp.route("/watch/<string:playlist_spotify_id>", methods=["DELETE"])
def remove_from_watchlist(playlist_spotify_id): def remove_from_watchlist(playlist_spotify_id):
"""Removes a playlist from the watchlist.""" """Removes a playlist from the watchlist."""
watch_config = get_watch_config() watch_config = get_watch_config()
@@ -238,76 +288,149 @@ def remove_from_watchlist(playlist_spotify_id):
logger.info(f"Attempting to remove playlist {playlist_spotify_id} from watchlist.") logger.info(f"Attempting to remove playlist {playlist_spotify_id} from watchlist.")
try: try:
if not get_watched_playlist(playlist_spotify_id): if not get_watched_playlist(playlist_spotify_id):
return jsonify({"error": f"Playlist {playlist_spotify_id} not found in watchlist."}), 404 return jsonify(
{"error": f"Playlist {playlist_spotify_id} not found in watchlist."}
), 404
remove_playlist_db(playlist_spotify_id) remove_playlist_db(playlist_spotify_id)
logger.info(f"Playlist {playlist_spotify_id} removed from watchlist successfully.") logger.info(
return jsonify({"message": f"Playlist {playlist_spotify_id} removed from watchlist."}), 200 f"Playlist {playlist_spotify_id} removed from watchlist successfully."
)
return jsonify(
{"message": f"Playlist {playlist_spotify_id} removed from watchlist."}
), 200
except Exception as e: except Exception as e:
logger.error(f"Error removing playlist {playlist_spotify_id} from watchlist: {e}", exc_info=True) logger.error(
return jsonify({"error": f"Could not remove playlist from watchlist: {str(e)}"}), 500 f"Error removing playlist {playlist_spotify_id} from watchlist: {e}",
exc_info=True,
)
return jsonify(
{"error": f"Could not remove playlist from watchlist: {str(e)}"}
), 500
@playlist_bp.route('/watch/<string:playlist_spotify_id>/tracks', methods=['POST'])
@playlist_bp.route("/watch/<string:playlist_spotify_id>/tracks", methods=["POST"])
def mark_tracks_as_known(playlist_spotify_id): def mark_tracks_as_known(playlist_spotify_id):
"""Fetches details for given track IDs and adds/updates them in the playlist's local DB table.""" """Fetches details for given track IDs and adds/updates them in the playlist's local DB table."""
watch_config = get_watch_config() watch_config = get_watch_config()
if not watch_config.get("enabled", False): if not watch_config.get("enabled", False):
return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark tracks."}), 403 return jsonify(
{
"error": "Watch feature is currently disabled globally. Cannot mark tracks."
}
), 403
logger.info(f"Attempting to mark tracks as known for playlist {playlist_spotify_id}.") logger.info(
f"Attempting to mark tracks as known for playlist {playlist_spotify_id}."
)
try: try:
track_ids = request.json track_ids = request.json
if not isinstance(track_ids, list) or not all(isinstance(tid, str) for tid in track_ids): if not isinstance(track_ids, list) or not all(
return jsonify({"error": "Invalid request body. Expecting a JSON array of track Spotify IDs."}), 400 isinstance(tid, str) for tid in track_ids
):
return jsonify(
{
"error": "Invalid request body. Expecting a JSON array of track Spotify IDs."
}
), 400
if not get_watched_playlist(playlist_spotify_id): if not get_watched_playlist(playlist_spotify_id):
return jsonify({"error": f"Playlist {playlist_spotify_id} is not being watched."}), 404 return jsonify(
{"error": f"Playlist {playlist_spotify_id} is not being watched."}
), 404
fetched_tracks_details = [] fetched_tracks_details = []
for track_id in track_ids: for track_id in track_ids:
try: try:
track_detail = get_spotify_info(track_id, "track") track_detail = get_spotify_info(track_id, "track")
if track_detail and track_detail.get('id'): if track_detail and track_detail.get("id"):
fetched_tracks_details.append(track_detail) fetched_tracks_details.append(track_detail)
else: else:
logger.warning(f"Could not fetch details for track {track_id} when marking as known for playlist {playlist_spotify_id}.") logger.warning(
f"Could not fetch details for track {track_id} when marking as known for playlist {playlist_spotify_id}."
)
except Exception as e: except Exception as e:
logger.error(f"Failed to fetch Spotify details for track {track_id}: {e}") logger.error(
f"Failed to fetch Spotify details for track {track_id}: {e}"
)
if not fetched_tracks_details: if not fetched_tracks_details:
return jsonify({"message": "No valid track details could be fetched to mark as known.", "processed_count": 0}), 200 return jsonify(
{
"message": "No valid track details could be fetched to mark as known.",
"processed_count": 0,
}
), 200
add_specific_tracks_to_playlist_table(playlist_spotify_id, fetched_tracks_details) add_specific_tracks_to_playlist_table(
logger.info(f"Successfully marked/updated {len(fetched_tracks_details)} tracks as known for playlist {playlist_spotify_id}.") playlist_spotify_id, fetched_tracks_details
return jsonify({"message": f"Successfully processed {len(fetched_tracks_details)} tracks for playlist {playlist_spotify_id}."}), 200 )
logger.info(
f"Successfully marked/updated {len(fetched_tracks_details)} tracks as known for playlist {playlist_spotify_id}."
)
return jsonify(
{
"message": f"Successfully processed {len(fetched_tracks_details)} tracks for playlist {playlist_spotify_id}."
}
), 200
except Exception as e: except Exception as e:
logger.error(f"Error marking tracks as known for playlist {playlist_spotify_id}: {e}", exc_info=True) logger.error(
f"Error marking tracks as known for playlist {playlist_spotify_id}: {e}",
exc_info=True,
)
return jsonify({"error": f"Could not mark tracks as known: {str(e)}"}), 500 return jsonify({"error": f"Could not mark tracks as known: {str(e)}"}), 500
@playlist_bp.route('/watch/<string:playlist_spotify_id>/tracks', methods=['DELETE'])
@playlist_bp.route("/watch/<string:playlist_spotify_id>/tracks", methods=["DELETE"])
def mark_tracks_as_missing_locally(playlist_spotify_id): def mark_tracks_as_missing_locally(playlist_spotify_id):
"""Removes specified tracks from the playlist's local DB table.""" """Removes specified tracks from the playlist's local DB table."""
watch_config = get_watch_config() watch_config = get_watch_config()
if not watch_config.get("enabled", False): if not watch_config.get("enabled", False):
return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark tracks."}), 403 return jsonify(
{
"error": "Watch feature is currently disabled globally. Cannot mark tracks."
}
), 403
logger.info(f"Attempting to mark tracks as missing (remove locally) for playlist {playlist_spotify_id}.") logger.info(
f"Attempting to mark tracks as missing (remove locally) for playlist {playlist_spotify_id}."
)
try: try:
track_ids = request.json track_ids = request.json
if not isinstance(track_ids, list) or not all(isinstance(tid, str) for tid in track_ids): if not isinstance(track_ids, list) or not all(
return jsonify({"error": "Invalid request body. Expecting a JSON array of track Spotify IDs."}), 400 isinstance(tid, str) for tid in track_ids
):
return jsonify(
{
"error": "Invalid request body. Expecting a JSON array of track Spotify IDs."
}
), 400
if not get_watched_playlist(playlist_spotify_id): if not get_watched_playlist(playlist_spotify_id):
return jsonify({"error": f"Playlist {playlist_spotify_id} is not being watched."}), 404 return jsonify(
{"error": f"Playlist {playlist_spotify_id} is not being watched."}
), 404
deleted_count = remove_specific_tracks_from_playlist_table(playlist_spotify_id, track_ids) deleted_count = remove_specific_tracks_from_playlist_table(
logger.info(f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}.") playlist_spotify_id, track_ids
return jsonify({"message": f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}."}), 200 )
logger.info(
f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}."
)
return jsonify(
{
"message": f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}."
}
), 200
except Exception as e: except Exception as e:
logger.error(f"Error marking tracks as missing (deleting locally) for playlist {playlist_spotify_id}: {e}", exc_info=True) logger.error(
f"Error marking tracks as missing (deleting locally) for playlist {playlist_spotify_id}: {e}",
exc_info=True,
)
return jsonify({"error": f"Could not mark tracks as missing: {str(e)}"}), 500 return jsonify({"error": f"Could not mark tracks as missing: {str(e)}"}), 500
@playlist_bp.route('/watch/list', methods=['GET'])
@playlist_bp.route("/watch/list", methods=["GET"])
def list_watched_playlists_endpoint(): def list_watched_playlists_endpoint():
"""Lists all playlists currently in the watchlist.""" """Lists all playlists currently in the watchlist."""
try: try:
@@ -317,43 +440,86 @@ def list_watched_playlists_endpoint():
logger.error(f"Error listing watched playlists: {e}", exc_info=True) logger.error(f"Error listing watched playlists: {e}", exc_info=True)
return jsonify({"error": f"Could not list watched playlists: {str(e)}"}), 500 return jsonify({"error": f"Could not list watched playlists: {str(e)}"}), 500
@playlist_bp.route('/watch/trigger_check', methods=['POST'])
@playlist_bp.route("/watch/trigger_check", methods=["POST"])
def trigger_playlist_check_endpoint(): def trigger_playlist_check_endpoint():
"""Manually triggers the playlist checking mechanism for all watched playlists.""" """Manually triggers the playlist checking mechanism for all watched playlists."""
watch_config = get_watch_config() watch_config = get_watch_config()
if not watch_config.get("enabled", False): if not watch_config.get("enabled", False):
return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403 return jsonify(
{
"error": "Watch feature is currently disabled globally. Cannot trigger check."
}
), 403
logger.info("Manual trigger for playlist check received for all playlists.") logger.info("Manual trigger for playlist check received for all playlists.")
try: try:
# Run check_watched_playlists without an ID to check all # Run check_watched_playlists without an ID to check all
thread = threading.Thread(target=check_watched_playlists, args=(None,)) thread = threading.Thread(target=check_watched_playlists, args=(None,))
thread.start() thread.start()
return jsonify({"message": "Playlist check triggered successfully in the background for all playlists."}), 202 return jsonify(
{
"message": "Playlist check triggered successfully in the background for all playlists."
}
), 202
except Exception as e: except Exception as e:
logger.error(f"Error manually triggering playlist check for all: {e}", exc_info=True) logger.error(
return jsonify({"error": f"Could not trigger playlist check for all: {str(e)}"}), 500 f"Error manually triggering playlist check for all: {e}", exc_info=True
)
return jsonify(
{"error": f"Could not trigger playlist check for all: {str(e)}"}
), 500
@playlist_bp.route('/watch/trigger_check/<string:playlist_spotify_id>', methods=['POST'])
@playlist_bp.route(
"/watch/trigger_check/<string:playlist_spotify_id>", methods=["POST"]
)
def trigger_specific_playlist_check_endpoint(playlist_spotify_id: str): def trigger_specific_playlist_check_endpoint(playlist_spotify_id: str):
"""Manually triggers the playlist checking mechanism for a specific playlist.""" """Manually triggers the playlist checking mechanism for a specific playlist."""
watch_config = get_watch_config() watch_config = get_watch_config()
if not watch_config.get("enabled", False): if not watch_config.get("enabled", False):
return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403 return jsonify(
{
"error": "Watch feature is currently disabled globally. Cannot trigger check."
}
), 403
logger.info(f"Manual trigger for specific playlist check received for ID: {playlist_spotify_id}") logger.info(
f"Manual trigger for specific playlist check received for ID: {playlist_spotify_id}"
)
try: try:
# Check if the playlist is actually in the watchlist first # Check if the playlist is actually in the watchlist first
watched_playlist = get_watched_playlist(playlist_spotify_id) watched_playlist = get_watched_playlist(playlist_spotify_id)
if not watched_playlist: if not watched_playlist:
logger.warning(f"Trigger specific check: Playlist ID {playlist_spotify_id} not found in watchlist.") logger.warning(
return jsonify({"error": f"Playlist {playlist_spotify_id} is not in the watchlist. Add it first."}), 404 f"Trigger specific check: Playlist ID {playlist_spotify_id} not found in watchlist."
)
return jsonify(
{
"error": f"Playlist {playlist_spotify_id} is not in the watchlist. Add it first."
}
), 404
# Run check_watched_playlists with the specific ID # Run check_watched_playlists with the specific ID
thread = threading.Thread(target=check_watched_playlists, args=(playlist_spotify_id,)) thread = threading.Thread(
target=check_watched_playlists, args=(playlist_spotify_id,)
)
thread.start() thread.start()
logger.info(f"Playlist check triggered in background for specific playlist ID: {playlist_spotify_id}") logger.info(
return jsonify({"message": f"Playlist check triggered successfully in the background for {playlist_spotify_id}."}), 202 f"Playlist check triggered in background for specific playlist ID: {playlist_spotify_id}"
)
return jsonify(
{
"message": f"Playlist check triggered successfully in the background for {playlist_spotify_id}."
}
), 202
except Exception as e: except Exception as e:
logger.error(f"Error manually triggering specific playlist check for {playlist_spotify_id}: {e}", exc_info=True) logger.error(
return jsonify({"error": f"Could not trigger playlist check for {playlist_spotify_id}: {str(e)}"}), 500 f"Error manually triggering specific playlist check for {playlist_spotify_id}: {e}",
exc_info=True,
)
return jsonify(
{
"error": f"Could not trigger playlist check for {playlist_spotify_id}: {str(e)}"
}
), 500

View File

@@ -1,6 +1,4 @@
from flask import Blueprint, abort, jsonify, Response, stream_with_context, request from flask import Blueprint, abort, jsonify, request
import os
import json
import logging import logging
import time import time
@@ -11,18 +9,18 @@ from routes.utils.celery_tasks import (
get_all_tasks, get_all_tasks,
cancel_task, cancel_task,
retry_task, retry_task,
ProgressState, redis_client,
redis_client
) )
# Configure logging # Configure logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
prgs_bp = Blueprint('prgs', __name__, url_prefix='/api/prgs') prgs_bp = Blueprint("prgs", __name__, url_prefix="/api/prgs")
# (Old .prg file system removed. Using new task system only.) # (Old .prg file system removed. Using new task system only.)
@prgs_bp.route('/<task_id>', methods=['GET'])
@prgs_bp.route("/<task_id>", methods=["GET"])
def get_prg_file(task_id): def get_prg_file(task_id):
""" """
Return a JSON object with the resource type, its name (title), Return a JSON object with the resource type, its name (title),
@@ -49,20 +47,31 @@ def get_prg_file(task_id):
if download_type and item_url: if download_type and item_url:
try: try:
# Extract the ID from the item_url (last part of the path) # Extract the ID from the item_url (last part of the path)
item_id = item_url.split('/')[-1] item_id = item_url.split("/")[-1]
if item_id: # Ensure item_id is not empty if item_id: # Ensure item_id is not empty
base_url = request.host_url.rstrip('/') base_url = request.host_url.rstrip("/")
dynamic_original_url = f"{base_url}/api/{download_type}/download/{item_id}" dynamic_original_url = (
f"{base_url}/api/{download_type}/download/{item_id}"
)
else: else:
logger.warning(f"Could not extract item ID from URL: {item_url} for task {task_id}. Falling back for original_url.") logger.warning(
f"Could not extract item ID from URL: {item_url} for task {task_id}. Falling back for original_url."
)
original_request_obj = task_info.get("original_request", {}) original_request_obj = task_info.get("original_request", {})
dynamic_original_url = original_request_obj.get("original_url", "") dynamic_original_url = original_request_obj.get("original_url", "")
except Exception as e: except Exception as e:
logger.error(f"Error constructing dynamic original_url for task {task_id}: {e}", exc_info=True) logger.error(
f"Error constructing dynamic original_url for task {task_id}: {e}",
exc_info=True,
)
original_request_obj = task_info.get("original_request", {}) original_request_obj = task_info.get("original_request", {})
dynamic_original_url = original_request_obj.get("original_url", "") # Fallback on any error dynamic_original_url = original_request_obj.get(
"original_url", ""
) # Fallback on any error
else: else:
logger.warning(f"Missing download_type ('{download_type}') or item_url ('{item_url}') in task_info for task {task_id}. Falling back for original_url.") logger.warning(
f"Missing download_type ('{download_type}') or item_url ('{item_url}') in task_info for task {task_id}. Falling back for original_url."
)
original_request_obj = task_info.get("original_request", {}) original_request_obj = task_info.get("original_request", {})
dynamic_original_url = original_request_obj.get("original_url", "") dynamic_original_url = original_request_obj.get("original_url", "")
@@ -73,12 +82,12 @@ def get_prg_file(task_id):
"last_line": last_status, "last_line": last_status,
"timestamp": time.time(), "timestamp": time.time(),
"task_id": task_id, "task_id": task_id,
"status_count": status_count "status_count": status_count,
} }
return jsonify(response) return jsonify(response)
@prgs_bp.route('/delete/<task_id>', methods=['DELETE']) @prgs_bp.route("/delete/<task_id>", methods=["DELETE"])
def delete_prg_file(task_id): def delete_prg_file(task_id):
""" """
Delete a task's information and history. Delete a task's information and history.
@@ -92,13 +101,12 @@ def delete_prg_file(task_id):
if not task_info: if not task_info:
abort(404, "Task not found") abort(404, "Task not found")
cancel_task(task_id) cancel_task(task_id)
from routes.utils.celery_tasks import redis_client
redis_client.delete(f"task:{task_id}:info") redis_client.delete(f"task:{task_id}:info")
redis_client.delete(f"task:{task_id}:status") redis_client.delete(f"task:{task_id}:status")
return {'message': f'Task {task_id} deleted successfully'}, 200 return {"message": f"Task {task_id} deleted successfully"}, 200
@prgs_bp.route('/list', methods=['GET']) @prgs_bp.route("/list", methods=["GET"])
def list_prg_files(): def list_prg_files():
""" """
Retrieve a list of all tasks in the system. Retrieve a list of all tasks in the system.
@@ -116,20 +124,38 @@ def list_prg_files():
last_status = get_last_task_status(task_id) last_status = get_last_task_status(task_id)
if task_info and last_status: if task_info and last_status:
detailed_tasks.append({ detailed_tasks.append(
{
"task_id": task_id, "task_id": task_id,
"type": task_info.get("type", task_summary.get("type", "unknown")), "type": task_info.get(
"name": task_info.get("name", task_summary.get("name", "Unknown")), "type", task_summary.get("type", "unknown")
"artist": task_info.get("artist", task_summary.get("artist", "")), ),
"download_type": task_info.get("download_type", task_summary.get("download_type", "unknown")), "name": task_info.get(
"status": last_status.get("status", "unknown"), # Keep summary status for quick access "name", task_summary.get("name", "Unknown")
),
"artist": task_info.get(
"artist", task_summary.get("artist", "")
),
"download_type": task_info.get(
"download_type",
task_summary.get("download_type", "unknown"),
),
"status": last_status.get(
"status", "unknown"
), # Keep summary status for quick access
"last_status_obj": last_status, # Full last status object "last_status_obj": last_status, # Full last status object
"original_request": task_info.get("original_request", {}), "original_request": task_info.get("original_request", {}),
"created_at": task_info.get("created_at", 0), "created_at": task_info.get("created_at", 0),
"timestamp": last_status.get("timestamp", task_info.get("created_at", 0)) "timestamp": last_status.get(
}) "timestamp", task_info.get("created_at", 0)
elif task_info: # If last_status is somehow missing, still provide some info ),
detailed_tasks.append({ }
)
elif (
task_info
): # If last_status is somehow missing, still provide some info
detailed_tasks.append(
{
"task_id": task_id, "task_id": task_id,
"type": task_info.get("type", "unknown"), "type": task_info.get("type", "unknown"),
"name": task_info.get("name", "Unknown"), "name": task_info.get("name", "Unknown"),
@@ -139,11 +165,14 @@ def list_prg_files():
"last_status_obj": None, "last_status_obj": None,
"original_request": task_info.get("original_request", {}), "original_request": task_info.get("original_request", {}),
"created_at": task_info.get("created_at", 0), "created_at": task_info.get("created_at", 0),
"timestamp": task_info.get("created_at", 0) "timestamp": task_info.get("created_at", 0),
}) }
)
# Sort tasks by creation time (newest first, or by timestamp if creation time is missing) # Sort tasks by creation time (newest first, or by timestamp if creation time is missing)
detailed_tasks.sort(key=lambda x: x.get('timestamp', x.get('created_at', 0)), reverse=True) detailed_tasks.sort(
key=lambda x: x.get("timestamp", x.get("created_at", 0)), reverse=True
)
return jsonify(detailed_tasks) return jsonify(detailed_tasks)
except Exception as e: except Exception as e:
@@ -151,7 +180,7 @@ def list_prg_files():
return jsonify({"error": "Failed to retrieve task list"}), 500 return jsonify({"error": "Failed to retrieve task list"}), 500
@prgs_bp.route('/retry/<task_id>', methods=['POST']) @prgs_bp.route("/retry/<task_id>", methods=["POST"])
def retry_task_endpoint(task_id): def retry_task_endpoint(task_id):
""" """
Retry a failed task. Retry a failed task.
@@ -170,15 +199,17 @@ def retry_task_endpoint(task_id):
# If not found in new system, we need to handle the old system retry # If not found in new system, we need to handle the old system retry
# For now, return an error as we're transitioning to the new system # For now, return an error as we're transitioning to the new system
return jsonify({ return jsonify(
{
"status": "error", "status": "error",
"message": "Retry for old system is not supported in the new API. Please use the new task ID format." "message": "Retry for old system is not supported in the new API. Please use the new task ID format.",
}), 400 }
), 400
except Exception as e: except Exception as e:
abort(500, f"An error occurred: {e}") abort(500, f"An error occurred: {e}")
@prgs_bp.route('/cancel/<task_id>', methods=['POST']) @prgs_bp.route("/cancel/<task_id>", methods=["POST"])
def cancel_task_endpoint(task_id): def cancel_task_endpoint(task_id):
""" """
Cancel a running or queued task. Cancel a running or queued task.
@@ -197,9 +228,11 @@ def cancel_task_endpoint(task_id):
# If not found in new system, we need to handle the old system cancellation # If not found in new system, we need to handle the old system cancellation
# For now, return an error as we're transitioning to the new system # For now, return an error as we're transitioning to the new system
return jsonify({ return jsonify(
{
"status": "error", "status": "error",
"message": "Cancellation for old system is not supported in the new API. Please use the new task ID format." "message": "Cancellation for old system is not supported in the new API. Please use the new task ID format.",
}), 400 }
), 400
except Exception as e: except Exception as e:
abort(500, f"An error occurred: {e}") abort(500, f"An error occurred: {e}")

View File

@@ -1,66 +1,67 @@
from flask import Blueprint, jsonify, request from flask import Blueprint, jsonify, request
import logging
from routes.utils.search import search # Corrected import from routes.utils.search import search # Corrected import
from routes.config import get_config # Import get_config function from routes.config import get_config # Import get_config function
search_bp = Blueprint('search', __name__) search_bp = Blueprint("search", __name__)
@search_bp.route('/search', methods=['GET'])
@search_bp.route("/search", methods=["GET"])
def handle_search(): def handle_search():
try: try:
# Get query parameters # Get query parameters
query = request.args.get('q', '') query = request.args.get("q", "")
search_type = request.args.get('search_type', '') search_type = request.args.get("search_type", "")
limit = int(request.args.get('limit', 10)) limit = int(request.args.get("limit", 10))
main = request.args.get('main', '') # Get the main parameter for account selection main = request.args.get(
"main", ""
) # Get the main parameter for account selection
# If main parameter is not provided in the request, get it from config # If main parameter is not provided in the request, get it from config
if not main: if not main:
config = get_config() config = get_config()
if config and 'spotify' in config: if config and "spotify" in config:
main = config['spotify'] main = config["spotify"]
print(f"Using main from config: {main}") print(f"Using main from config: {main}")
# Validate parameters # Validate parameters
if not query: if not query:
return jsonify({'error': 'Missing search query'}), 400 return jsonify({"error": "Missing search query"}), 400
valid_types = ['track', 'album', 'artist', 'playlist', 'episode'] valid_types = ["track", "album", "artist", "playlist", "episode"]
if search_type not in valid_types: if search_type not in valid_types:
return jsonify({'error': 'Invalid search type'}), 400 return jsonify({"error": "Invalid search type"}), 400
# Perform the search with corrected parameter name # Perform the search with corrected parameter name
raw_results = search( raw_results = search(
query=query, query=query,
search_type=search_type, # Fixed parameter name search_type=search_type, # Fixed parameter name
limit=limit, limit=limit,
main=main # Pass the main parameter main=main, # Pass the main parameter
) )
# Extract items from the appropriate section of the response based on search_type # Extract items from the appropriate section of the response based on search_type
items = [] items = []
if raw_results and search_type + 's' in raw_results: if raw_results and search_type + "s" in raw_results:
type_key = search_type + 's' type_key = search_type + "s"
items = raw_results[type_key].get('items', []) items = raw_results[type_key].get("items", [])
elif raw_results and search_type in raw_results: elif raw_results and search_type in raw_results:
items = raw_results[search_type].get("items", [])
items = raw_results[search_type].get('items', [])
# Return both the items array and the full data for debugging # Return both the items array and the full data for debugging
return jsonify({ return jsonify(
'items': items, {
'data': raw_results, # Include full data for debugging "items": items,
'error': None "data": raw_results, # Include full data for debugging
}) "error": None,
}
)
except ValueError as e: except ValueError as e:
print(f"ValueError in search: {str(e)}") print(f"ValueError in search: {str(e)}")
return jsonify({'error': str(e)}), 400 return jsonify({"error": str(e)}), 400
except Exception as e: except Exception as e:
import traceback import traceback
print(f"Exception in search: {str(e)}") print(f"Exception in search: {str(e)}")
print(traceback.format_exc()) print(traceback.format_exc())
return jsonify({'error': f'Internal server error: {str(e)}'}), 500 return jsonify({"error": f"Internal server error: {str(e)}"}), 500

View File

@@ -1,17 +1,21 @@
from flask import Blueprint, Response, request from flask import Blueprint, Response, request
import os
import json import json
import traceback import traceback
import uuid # For generating error task IDs import uuid # For generating error task IDs
import time # For timestamps import time # For timestamps
from routes.utils.celery_queue_manager import download_queue_manager from routes.utils.celery_queue_manager import download_queue_manager
from routes.utils.celery_tasks import store_task_info, store_task_status, ProgressState # For error task creation from routes.utils.celery_tasks import (
store_task_info,
store_task_status,
ProgressState,
) # For error task creation
from urllib.parse import urlparse # for URL validation from urllib.parse import urlparse # for URL validation
from routes.utils.get_info import get_spotify_info # Added import from routes.utils.get_info import get_spotify_info # Added import
track_bp = Blueprint('track', __name__) track_bp = Blueprint("track", __name__)
@track_bp.route('/download/<track_id>', methods=['GET'])
@track_bp.route("/download/<track_id>", methods=["GET"])
def handle_download(track_id): def handle_download(track_id):
# Retrieve essential parameters from the request. # Retrieve essential parameters from the request.
# name = request.args.get('name') # Removed # name = request.args.get('name') # Removed
@@ -25,132 +29,151 @@ def handle_download(track_id):
# Fetch metadata from Spotify # Fetch metadata from Spotify
try: try:
track_info = get_spotify_info(track_id, "track") track_info = get_spotify_info(track_id, "track")
if not track_info or not track_info.get('name') or not track_info.get('artists'): if (
not track_info
or not track_info.get("name")
or not track_info.get("artists")
):
return Response( return Response(
json.dumps({"error": f"Could not retrieve metadata for track ID: {track_id}"}), json.dumps(
{"error": f"Could not retrieve metadata for track ID: {track_id}"}
),
status=404, status=404,
mimetype='application/json' mimetype="application/json",
) )
name_from_spotify = track_info.get('name') name_from_spotify = track_info.get("name")
artist_from_spotify = track_info['artists'][0].get('name') if track_info['artists'] else "Unknown Artist" artist_from_spotify = (
track_info["artists"][0].get("name")
if track_info["artists"]
else "Unknown Artist"
)
except Exception as e: except Exception as e:
return Response( return Response(
json.dumps({"error": f"Failed to fetch metadata for track {track_id}: {str(e)}"}), json.dumps(
{"error": f"Failed to fetch metadata for track {track_id}: {str(e)}"}
),
status=500, status=500,
mimetype='application/json' mimetype="application/json",
) )
# Validate required parameters # Validate required parameters
if not url: if not url:
return Response( return Response(
json.dumps({"error": "Missing required parameter: url", "original_url": url}), json.dumps(
{"error": "Missing required parameter: url", "original_url": url}
),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
# Validate URL domain # Validate URL domain
parsed = urlparse(url) parsed = urlparse(url)
host = parsed.netloc.lower() host = parsed.netloc.lower()
if not (host.endswith('deezer.com') or host.endswith('open.spotify.com') or host.endswith('spotify.com')): if not (
host.endswith("deezer.com")
or host.endswith("open.spotify.com")
or host.endswith("spotify.com")
):
return Response( return Response(
json.dumps({"error": f"Invalid Link {url} :(", "original_url": url}), json.dumps({"error": f"Invalid Link {url} :(", "original_url": url}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
try: try:
task_id = download_queue_manager.add_task({ task_id = download_queue_manager.add_task(
{
"download_type": "track", "download_type": "track",
"url": url, "url": url,
"name": name_from_spotify, # Use fetched name "name": name_from_spotify, # Use fetched name
"artist": artist_from_spotify, # Use fetched artist "artist": artist_from_spotify, # Use fetched artist
"orig_request": orig_params "orig_request": orig_params,
}) }
)
# Removed DuplicateDownloadError handling, add_task now manages this by creating an error task. # Removed DuplicateDownloadError handling, add_task now manages this by creating an error task.
except Exception as e: except Exception as e:
# Generic error handling for other issues during task submission # Generic error handling for other issues during task submission
error_task_id = str(uuid.uuid4()) error_task_id = str(uuid.uuid4())
store_task_info(error_task_id, { store_task_info(
error_task_id,
{
"download_type": "track", "download_type": "track",
"url": url, "url": url,
"name": name_from_spotify, # Use fetched name "name": name_from_spotify, # Use fetched name
"artist": artist_from_spotify, # Use fetched artist "artist": artist_from_spotify, # Use fetched artist
"original_request": orig_params, "original_request": orig_params,
"created_at": time.time(), "created_at": time.time(),
"is_submission_error_task": True "is_submission_error_task": True,
}) },
store_task_status(error_task_id, { )
store_task_status(
error_task_id,
{
"status": ProgressState.ERROR, "status": ProgressState.ERROR,
"error": f"Failed to queue track download: {str(e)}", "error": f"Failed to queue track download: {str(e)}",
"timestamp": time.time() "timestamp": time.time(),
}) },
)
return Response( return Response(
json.dumps({"error": f"Failed to queue track download: {str(e)}", "task_id": error_task_id}), json.dumps(
{
"error": f"Failed to queue track download: {str(e)}",
"task_id": error_task_id,
}
),
status=500, status=500,
mimetype='application/json' mimetype="application/json",
) )
return Response( return Response(
json.dumps({"prg_file": task_id}), # prg_file is the old name for task_id json.dumps({"prg_file": task_id}), # prg_file is the old name for task_id
status=202, status=202,
mimetype='application/json' mimetype="application/json",
) )
@track_bp.route('/download/cancel', methods=['GET'])
@track_bp.route("/download/cancel", methods=["GET"])
def cancel_download(): def cancel_download():
""" """
Cancel a running track download process by its process id (prg file name). Cancel a running track download process by its process id (prg file name).
""" """
prg_file = request.args.get('prg_file') prg_file = request.args.get("prg_file")
if not prg_file: if not prg_file:
return Response( return Response(
json.dumps({"error": "Missing process id (prg_file) parameter"}), json.dumps({"error": "Missing process id (prg_file) parameter"}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
# Use the queue manager's cancellation method. # Use the queue manager's cancellation method.
result = download_queue_manager.cancel_task(prg_file) result = download_queue_manager.cancel_task(prg_file)
status_code = 200 if result.get("status") == "cancelled" else 404 status_code = 200 if result.get("status") == "cancelled" else 404
return Response( return Response(json.dumps(result), status=status_code, mimetype="application/json")
json.dumps(result),
status=status_code,
mimetype='application/json'
)
@track_bp.route('/info', methods=['GET'])
@track_bp.route("/info", methods=["GET"])
def get_track_info(): def get_track_info():
""" """
Retrieve Spotify track metadata given a Spotify track ID. Retrieve Spotify track metadata given a Spotify track ID.
Expects a query parameter 'id' that contains the Spotify track ID. Expects a query parameter 'id' that contains the Spotify track ID.
""" """
spotify_id = request.args.get('id') spotify_id = request.args.get("id")
if not spotify_id: if not spotify_id:
return Response( return Response(
json.dumps({"error": "Missing parameter: id"}), json.dumps({"error": "Missing parameter: id"}),
status=400, status=400,
mimetype='application/json' mimetype="application/json",
) )
try: try:
# Import and use the get_spotify_info function from the utility module. # Import and use the get_spotify_info function from the utility module.
from routes.utils.get_info import get_spotify_info from routes.utils.get_info import get_spotify_info
track_info = get_spotify_info(spotify_id, "track") track_info = get_spotify_info(spotify_id, "track")
return Response( return Response(json.dumps(track_info), status=200, mimetype="application/json")
json.dumps(track_info),
status=200,
mimetype='application/json'
)
except Exception as e: except Exception as e:
error_data = { error_data = {"error": str(e), "traceback": traceback.format_exc()}
"error": str(e), return Response(json.dumps(error_data), status=500, mimetype="application/json")
"traceback": traceback.format_exc()
}
return Response(
json.dumps(error_data),
status=500,
mimetype='application/json'
)

View File

@@ -1,11 +1,12 @@
import os
import json
import traceback import traceback
from deezspot.spotloader import SpoLogin from deezspot.spotloader import SpoLogin
from deezspot.deezloader import DeeLogin from deezspot.deezloader import DeeLogin
from pathlib import Path from routes.utils.credentials import (
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds, get_spotify_blob_path get_credential,
from routes.utils.celery_config import get_config_params _get_global_spotify_api_creds,
get_spotify_blob_path,
)
def download_album( def download_album(
url, url,
@@ -23,51 +24,63 @@ def download_album(
max_retries=3, max_retries=3,
progress_callback=None, progress_callback=None,
convert_to=None, convert_to=None,
bitrate=None bitrate=None,
): ):
try: try:
# Detect URL source (Spotify or Deezer) from URL # Detect URL source (Spotify or Deezer) from URL
is_spotify_url = 'open.spotify.com' in url.lower() is_spotify_url = "open.spotify.com" in url.lower()
is_deezer_url = 'deezer.com' in url.lower() is_deezer_url = "deezer.com" in url.lower()
service = '' service = ""
if is_spotify_url: if is_spotify_url:
service = 'spotify' service = "spotify"
elif is_deezer_url: elif is_deezer_url:
service = 'deezer' service = "deezer"
else: else:
error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com" error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com"
print(f"ERROR: {error_msg}") print(f"ERROR: {error_msg}")
raise ValueError(error_msg) raise ValueError(error_msg)
print(f"DEBUG: album.py - Service determined from URL: {service}") print(f"DEBUG: album.py - Service determined from URL: {service}")
print(f"DEBUG: album.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'") print(
f"DEBUG: album.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'"
)
# Get global Spotify API credentials # Get global Spotify API credentials
global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds() global_spotify_client_id, global_spotify_client_secret = (
_get_global_spotify_api_creds()
)
if not global_spotify_client_id or not global_spotify_client_secret: if not global_spotify_client_id or not global_spotify_client_secret:
warning_msg = "WARN: album.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail." warning_msg = "WARN: album.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail."
print(warning_msg) print(warning_msg)
if service == 'spotify': if service == "spotify":
if fallback: # Fallback is a Deezer account name for a Spotify URL if fallback: # Fallback is a Deezer account name for a Spotify URL
if quality is None: quality = 'FLAC' # Deezer quality for first attempt if quality is None:
if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails) quality = "FLAC" # Deezer quality for first attempt
if fall_quality is None:
fall_quality = (
"HIGH" # Spotify quality for fallback (if Deezer fails)
)
deezer_error = None deezer_error = None
try: try:
# Attempt 1: Deezer via download_albumspo (using 'fallback' as Deezer account name) # Attempt 1: Deezer via download_albumspo (using 'fallback' as Deezer account name)
print(f"DEBUG: album.py - Spotify URL. Attempt 1: Deezer (account: {fallback})") print(
deezer_fallback_creds = get_credential('deezer', fallback) f"DEBUG: album.py - Spotify URL. Attempt 1: Deezer (account: {fallback})"
arl = deezer_fallback_creds.get('arl') )
deezer_fallback_creds = get_credential("deezer", fallback)
arl = deezer_fallback_creds.get("arl")
if not arl: if not arl:
raise ValueError(f"ARL not found for Deezer account '{fallback}'.") raise ValueError(
f"ARL not found for Deezer account '{fallback}'."
)
dl = DeeLogin( dl = DeeLogin(
arl=arl, arl=arl,
spotify_client_id=global_spotify_client_id, spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret, spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback progress_callback=progress_callback,
) )
dl.download_albumspo( dl.download_albumspo(
link_album=url, # Spotify URL link_album=url, # Spotify URL
@@ -85,29 +98,44 @@ def download_album(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: album.py - Album download via Deezer (account: {fallback}) successful for Spotify URL."
) )
print(f"DEBUG: album.py - Album download via Deezer (account: {fallback}) successful for Spotify URL.")
except Exception as e: except Exception as e:
deezer_error = e deezer_error = e
print(f"ERROR: album.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}") print(
f"ERROR: album.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}"
)
traceback.print_exc() traceback.print_exc()
print(f"DEBUG: album.py - Attempting Spotify direct download (account: {main} for blob)...") print(
f"DEBUG: album.py - Attempting Spotify direct download (account: {main} for blob)..."
)
# Attempt 2: Spotify direct via download_album (using 'main' as Spotify account for blob) # Attempt 2: Spotify direct via download_album (using 'main' as Spotify account for blob)
try: try:
if not global_spotify_client_id or not global_spotify_client_secret: if (
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") not global_spotify_client_id
or not global_spotify_client_secret
):
raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
blob_file_path = get_spotify_blob_path(main) blob_file_path = get_spotify_blob_path(main)
if not blob_file_path or not blob_file_path.exists(): if not blob_file_path or not blob_file_path.exists():
raise FileNotFoundError(f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}") raise FileNotFoundError(
f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}"
)
spo = SpoLogin( spo = SpoLogin(
credentials_path=str(blob_file_path), # Ensure it's a string credentials_path=str(
blob_file_path
), # Ensure it's a string
spotify_client_id=global_spotify_client_id, spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret, spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback progress_callback=progress_callback,
) )
spo.download_album( spo.download_album(
link_album=url, # Spotify URL link_album=url, # Spotify URL
@@ -126,31 +154,42 @@ def download_album(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: album.py - Spotify direct download (account: {main} for blob) successful."
) )
print(f"DEBUG: album.py - Spotify direct download (account: {main} for blob) successful.")
except Exception as e2: except Exception as e2:
print(f"ERROR: album.py - Spotify direct download (account: {main} for blob) also failed: {e2}") print(
f"ERROR: album.py - Spotify direct download (account: {main} for blob) also failed: {e2}"
)
raise RuntimeError( raise RuntimeError(
f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. " f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. "
f"Deezer error: {deezer_error}, Spotify error: {e2}" f"Deezer error: {deezer_error}, Spotify error: {e2}"
) from e2 ) from e2
else: else:
# Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob) # Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob)
if quality is None: quality = 'HIGH' # Default Spotify quality if quality is None:
print(f"DEBUG: album.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}") quality = "HIGH" # Default Spotify quality
print(
f"DEBUG: album.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}"
)
if not global_spotify_client_id or not global_spotify_client_secret: if not global_spotify_client_id or not global_spotify_client_secret:
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
blob_file_path = get_spotify_blob_path(main) blob_file_path = get_spotify_blob_path(main)
if not blob_file_path or not blob_file_path.exists(): if not blob_file_path or not blob_file_path.exists():
raise FileNotFoundError(f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}") raise FileNotFoundError(
f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}"
)
spo = SpoLogin( spo = SpoLogin(
credentials_path=str(blob_file_path), # Ensure it's a string credentials_path=str(blob_file_path), # Ensure it's a string
spotify_client_id=global_spotify_client_id, spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret, spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback progress_callback=progress_callback,
) )
spo.download_album( spo.download_album(
link_album=url, link_album=url,
@@ -169,16 +208,21 @@ def download_album(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: album.py - Direct Spotify download (account: {main} for blob) successful."
) )
print(f"DEBUG: album.py - Direct Spotify download (account: {main} for blob) successful.")
elif service == 'deezer': elif service == "deezer":
# Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL) # Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL)
if quality is None: quality = 'FLAC' # Default Deezer quality if quality is None:
print(f"DEBUG: album.py - Deezer URL. Direct download with Deezer account: {main}") quality = "FLAC" # Default Deezer quality
deezer_main_creds = get_credential('deezer', main) # For ARL print(
arl = deezer_main_creds.get('arl') f"DEBUG: album.py - Deezer URL. Direct download with Deezer account: {main}"
)
deezer_main_creds = get_credential("deezer", main) # For ARL
arl = deezer_main_creds.get("arl")
if not arl: if not arl:
raise ValueError(f"ARL not found for Deezer account '{main}'.") raise ValueError(f"ARL not found for Deezer account '{main}'.")
@@ -186,7 +230,7 @@ def download_album(
arl=arl, # Account specific ARL arl=arl, # Account specific ARL
spotify_client_id=global_spotify_client_id, # Global Spotify keys spotify_client_id=global_spotify_client_id, # Global Spotify keys
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
progress_callback=progress_callback progress_callback=progress_callback,
) )
dl.download_albumdee( # Deezer URL, download via Deezer dl.download_albumdee( # Deezer URL, download via Deezer
link_album=url, link_album=url,
@@ -203,9 +247,11 @@ def download_album(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: album.py - Direct Deezer download (account: {main}) successful."
) )
print(f"DEBUG: album.py - Direct Deezer download (account: {main}) successful.")
else: else:
# Should be caught by initial service check, but as a safeguard # Should be caught by initial service check, but as a safeguard
raise ValueError(f"Unsupported service determined: {service}") raise ValueError(f"Unsupported service determined: {service}")

View File

@@ -1,10 +1,7 @@
import json import json
import traceback
from pathlib import Path
import os
import logging import logging
from flask import Blueprint, Response, request, url_for from flask import url_for
from routes.utils.celery_queue_manager import download_queue_manager, get_config_params from routes.utils.celery_queue_manager import download_queue_manager
from routes.utils.get_info import get_spotify_info from routes.utils.get_info import get_spotify_info
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
from routes.utils.celery_tasks import get_last_task_status, ProgressState from routes.utils.celery_tasks import get_last_task_status, ProgressState
@@ -15,12 +12,18 @@ from deezspot.libutils.utils import get_ids, link_is_valid
# Configure logging # Configure logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def log_json(message_dict): def log_json(message_dict):
"""Helper function to output a JSON-formatted log message.""" """Helper function to output a JSON-formatted log message."""
print(json.dumps(message_dict)) print(json.dumps(message_dict))
def get_artist_discography(url, main_spotify_account_name, album_type='album,single,compilation,appears_on', progress_callback=None): def get_artist_discography(
url,
main_spotify_account_name,
album_type="album,single,compilation,appears_on",
progress_callback=None,
):
""" """
Validate the URL, extract the artist ID, and retrieve the discography. Validate the URL, extract the artist ID, and retrieve the discography.
Uses global Spotify API client_id/secret for Spo initialization. Uses global Spotify API client_id/secret for Spo initialization.
@@ -39,21 +42,34 @@ def get_artist_discography(url, main_spotify_account_name, album_type='album,sin
client_id, client_secret = _get_global_spotify_api_creds() client_id, client_secret = _get_global_spotify_api_creds()
if not client_id or not client_secret: if not client_id or not client_secret:
log_json({"status": "error", "message": "Global Spotify API client_id or client_secret not configured."}) log_json(
{
"status": "error",
"message": "Global Spotify API client_id or client_secret not configured.",
}
)
raise ValueError("Global Spotify API credentials are not configured.") raise ValueError("Global Spotify API credentials are not configured.")
if not main_spotify_account_name: if not main_spotify_account_name:
# This is a warning now, as API keys are global. # This is a warning now, as API keys are global.
logger.warning("main_spotify_account_name not provided for get_artist_discography context. Using global API keys.") logger.warning(
"main_spotify_account_name not provided for get_artist_discography context. Using global API keys."
)
else: else:
# Check if account exists for context, good for consistency # Check if account exists for context, good for consistency
try: try:
get_credential('spotify', main_spotify_account_name) get_credential("spotify", main_spotify_account_name)
logger.debug(f"Spotify account context '{main_spotify_account_name}' exists for get_artist_discography.") logger.debug(
f"Spotify account context '{main_spotify_account_name}' exists for get_artist_discography."
)
except FileNotFoundError: except FileNotFoundError:
logger.warning(f"Spotify account '{main_spotify_account_name}' provided for discography context not found.") logger.warning(
f"Spotify account '{main_spotify_account_name}' provided for discography context not found."
)
except Exception as e: except Exception as e:
logger.warning(f"Error checking Spotify account '{main_spotify_account_name}' for discography context: {e}") logger.warning(
f"Error checking Spotify account '{main_spotify_account_name}' for discography context: {e}"
)
Spo.__init__(client_id, client_secret) # Initialize with global API keys Spo.__init__(client_id, client_secret) # Initialize with global API keys
@@ -78,7 +94,9 @@ def get_artist_discography(url, main_spotify_account_name, album_type='album,sin
raise raise
def download_artist_albums(url, album_type="album,single,compilation", request_args=None): def download_artist_albums(
url, album_type="album,single,compilation", request_args=None
):
""" """
Download albums by an artist, filtered by album types. Download albums by an artist, filtered by album types.
@@ -95,19 +113,20 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
raise ValueError("Missing required parameter: url") raise ValueError("Missing required parameter: url")
# Extract artist ID from URL # Extract artist ID from URL
artist_id = url.split('/')[-1] artist_id = url.split("/")[-1]
if '?' in artist_id: if "?" in artist_id:
artist_id = artist_id.split('?')[0] artist_id = artist_id.split("?")[0]
logger.info(f"Fetching artist info for ID: {artist_id}") logger.info(f"Fetching artist info for ID: {artist_id}")
# Detect URL source (only Spotify is supported for artists) # Detect URL source (only Spotify is supported for artists)
is_spotify_url = 'open.spotify.com' in url.lower() is_spotify_url = "open.spotify.com" in url.lower()
is_deezer_url = 'deezer.com' in url.lower()
# Artist functionality only works with Spotify URLs currently # Artist functionality only works with Spotify URLs currently
if not is_spotify_url: if not is_spotify_url:
error_msg = "Invalid URL: Artist functionality only supports open.spotify.com URLs" error_msg = (
"Invalid URL: Artist functionality only supports open.spotify.com URLs"
)
logger.error(error_msg) logger.error(error_msg)
raise ValueError(error_msg) raise ValueError(error_msg)
@@ -115,33 +134,40 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
artist_data = get_spotify_info(artist_id, "artist_discography") artist_data = get_spotify_info(artist_id, "artist_discography")
# Debug logging to inspect the structure of artist_data # Debug logging to inspect the structure of artist_data
logger.debug(f"Artist data structure has keys: {list(artist_data.keys() if isinstance(artist_data, dict) else [])}") logger.debug(
f"Artist data structure has keys: {list(artist_data.keys() if isinstance(artist_data, dict) else [])}"
)
if not artist_data or 'items' not in artist_data: if not artist_data or "items" not in artist_data:
raise ValueError(f"Failed to retrieve artist data or no albums found for artist ID {artist_id}") raise ValueError(
f"Failed to retrieve artist data or no albums found for artist ID {artist_id}"
)
# Parse the album types to filter by # Parse the album types to filter by
allowed_types = [t.strip().lower() for t in album_type.split(",")] allowed_types = [t.strip().lower() for t in album_type.split(",")]
logger.info(f"Filtering albums by types: {allowed_types}") logger.info(f"Filtering albums by types: {allowed_types}")
# Get artist name from the first album
artist_name = ""
if artist_data.get('items') and len(artist_data['items']) > 0:
first_album = artist_data['items'][0]
if first_album.get('artists') and len(first_album['artists']) > 0:
artist_name = first_album['artists'][0].get('name', '')
# Filter albums by the specified types # Filter albums by the specified types
filtered_albums = [] filtered_albums = []
for album in artist_data.get('items', []): for album in artist_data.get("items", []):
album_type_value = album.get('album_type', '').lower() album_type_value = album.get("album_type", "").lower()
album_group_value = album.get('album_group', '').lower() album_group_value = album.get("album_group", "").lower()
# Apply filtering logic based on album_type and album_group # Apply filtering logic based on album_type and album_group
if (('album' in allowed_types and album_type_value == 'album' and album_group_value == 'album') or if (
('single' in allowed_types and album_type_value == 'single' and album_group_value == 'single') or (
('compilation' in allowed_types and album_type_value == 'compilation') or "album" in allowed_types
('appears_on' in allowed_types and album_group_value == 'appears_on')): and album_type_value == "album"
and album_group_value == "album"
)
or (
"single" in allowed_types
and album_type_value == "single"
and album_group_value == "single"
)
or ("compilation" in allowed_types and album_type_value == "compilation")
or ("appears_on" in allowed_types and album_group_value == "appears_on")
):
filtered_albums.append(album) filtered_albums.append(album)
if not filtered_albums: if not filtered_albums:
@@ -158,14 +184,18 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
logger.debug(f"Processing album: {album.get('name', 'Unknown')}") logger.debug(f"Processing album: {album.get('name', 'Unknown')}")
logger.debug(f"Album structure has keys: {list(album.keys())}") logger.debug(f"Album structure has keys: {list(album.keys())}")
external_urls = album.get('external_urls', {}) external_urls = album.get("external_urls", {})
logger.debug(f"Album external_urls: {external_urls}") logger.debug(f"Album external_urls: {external_urls}")
album_url = external_urls.get('spotify', '') album_url = external_urls.get("spotify", "")
album_name = album.get('name', 'Unknown Album') album_name = album.get("name", "Unknown Album")
album_artists = album.get('artists', []) album_artists = album.get("artists", [])
album_artist = album_artists[0].get('name', 'Unknown Artist') if album_artists else 'Unknown Artist' album_artist = (
album_id = album.get('id') album_artists[0].get("name", "Unknown Artist")
if album_artists
else "Unknown Artist"
)
album_id = album.get("id")
logger.debug(f"Extracted album URL: {album_url}") logger.debug(f"Extracted album URL: {album_url}")
logger.debug(f"Extracted album ID: {album_id}") logger.debug(f"Extracted album ID: {album_id}")
@@ -182,11 +212,13 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
"type": "album", "type": "album",
# URL source will be automatically detected in the download functions # URL source will be automatically detected in the download functions
"parent_artist_url": url, "parent_artist_url": url,
"parent_request_type": "artist" "parent_request_type": "artist",
} }
# Include original download URL for this album task # Include original download URL for this album task
album_request_args["original_url"] = url_for('album.handle_download', album_id=album_id, _external=True) album_request_args["original_url"] = url_for(
"album.handle_download", album_id=album_id, _external=True
)
# Create task for this album # Create task for this album
task_data = { task_data = {
@@ -196,11 +228,13 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
"retry_url": album_url, # Use album URL for retry logic, not artist URL "retry_url": album_url, # Use album URL for retry logic, not artist URL
"name": album_name, "name": album_name,
"artist": album_artist, "artist": album_artist,
"orig_request": album_request_args # Store album-specific request params "orig_request": album_request_args, # Store album-specific request params
} }
# Debug log the task data being sent to the queue # Debug log the task data being sent to the queue
logger.debug(f"Album task data: url={task_data['url']}, retry_url={task_data['retry_url']}") logger.debug(
f"Album task data: url={task_data['url']}, retry_url={task_data['retry_url']}"
)
try: try:
task_id = download_queue_manager.add_task(task_data) task_id = download_queue_manager.add_task(task_data)
@@ -208,32 +242,50 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
# Check the status of the newly added task to see if it was marked as a duplicate error # Check the status of the newly added task to see if it was marked as a duplicate error
last_status = get_last_task_status(task_id) last_status = get_last_task_status(task_id)
if last_status and last_status.get("status") == ProgressState.ERROR and last_status.get("existing_task_id"): if (
logger.warning(f"Album {album_name} (URL: {album_url}) is a duplicate. Error task ID: {task_id}. Existing task ID: {last_status.get('existing_task_id')}") last_status
duplicate_albums.append({ and last_status.get("status") == ProgressState.ERROR
and last_status.get("existing_task_id")
):
logger.warning(
f"Album {album_name} (URL: {album_url}) is a duplicate. Error task ID: {task_id}. Existing task ID: {last_status.get('existing_task_id')}"
)
duplicate_albums.append(
{
"name": album_name, "name": album_name,
"artist": album_artist, "artist": album_artist,
"url": album_url, "url": album_url,
"error_task_id": task_id, # This is the ID of the task marked as a duplicate error "error_task_id": task_id, # This is the ID of the task marked as a duplicate error
"existing_task_id": last_status.get("existing_task_id"), "existing_task_id": last_status.get("existing_task_id"),
"message": last_status.get("message", "Duplicate download attempt.") "message": last_status.get(
}) "message", "Duplicate download attempt."
),
}
)
else: else:
# If not a duplicate error, it was successfully queued (or failed for other reasons handled by add_task) # If not a duplicate error, it was successfully queued (or failed for other reasons handled by add_task)
# We only add to successfully_queued_albums if it wasn't a duplicate error from add_task # We only add to successfully_queued_albums if it wasn't a duplicate error from add_task
# Other errors from add_task (like submission failure) would also result in an error status for task_id # Other errors from add_task (like submission failure) would also result in an error status for task_id
# but won't have 'existing_task_id'. The client can check the status of this task_id. # but won't have 'existing_task_id'. The client can check the status of this task_id.
album_task_ids.append(task_id) # Keep track of all task_ids returned by add_task album_task_ids.append(
successfully_queued_albums.append({ task_id
) # Keep track of all task_ids returned by add_task
successfully_queued_albums.append(
{
"name": album_name, "name": album_name,
"artist": album_artist, "artist": album_artist,
"url": album_url, "url": album_url,
"task_id": task_id "task_id": task_id,
}) }
)
logger.info(f"Queued album download: {album_name} ({task_id})") logger.info(f"Queued album download: {album_name} ({task_id})")
except Exception as e: # Catch any other unexpected error from add_task itself (though it should be rare now) except Exception as e: # Catch any other unexpected error from add_task itself (though it should be rare now)
logger.error(f"Failed to queue album {album_name} due to an unexpected error in add_task: {str(e)}") logger.error(
f"Failed to queue album {album_name} due to an unexpected error in add_task: {str(e)}"
)
# Optionally, collect these errors. For now, just logging and continuing. # Optionally, collect these errors. For now, just logging and continuing.
logger.info(f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found.") logger.info(
f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found."
)
return successfully_queued_albums, duplicate_albums return successfully_queued_albums, duplicate_albums

View File

@@ -7,43 +7,46 @@ from pathlib import Path
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Redis configuration - read from environment variables # Redis configuration - read from environment variables
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost') REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT = os.getenv('REDIS_PORT', '6379') REDIS_PORT = os.getenv("REDIS_PORT", "6379")
REDIS_DB = os.getenv('REDIS_DB', '0') REDIS_DB = os.getenv("REDIS_DB", "0")
# Optional Redis password # Optional Redis password
REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', '') REDIS_PASSWORD = os.getenv("REDIS_PASSWORD", "")
# Build default URL with password if provided # Build default URL with password if provided
_password_part = f":{REDIS_PASSWORD}@" if REDIS_PASSWORD else "" _password_part = f":{REDIS_PASSWORD}@" if REDIS_PASSWORD else ""
default_redis_url = f"redis://{_password_part}{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}" default_redis_url = f"redis://{_password_part}{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}"
REDIS_URL = os.getenv('REDIS_URL', default_redis_url) REDIS_URL = os.getenv("REDIS_URL", default_redis_url)
REDIS_BACKEND = os.getenv('REDIS_BACKEND', REDIS_URL) REDIS_BACKEND = os.getenv("REDIS_BACKEND", REDIS_URL)
# Log Redis connection details # Log Redis connection details
logger.info(f"Redis configuration: REDIS_URL={REDIS_URL}, REDIS_BACKEND={REDIS_BACKEND}") logger.info(
f"Redis configuration: REDIS_URL={REDIS_URL}, REDIS_BACKEND={REDIS_BACKEND}"
)
# Config path # Config path
CONFIG_FILE_PATH = Path('./data/config/main.json') CONFIG_FILE_PATH = Path("./data/config/main.json")
DEFAULT_MAIN_CONFIG = { DEFAULT_MAIN_CONFIG = {
'service': 'spotify', "service": "spotify",
'spotify': '', "spotify": "",
'deezer': '', "deezer": "",
'fallback': False, "fallback": False,
'spotifyQuality': 'NORMAL', "spotifyQuality": "NORMAL",
'deezerQuality': 'MP3_128', "deezerQuality": "MP3_128",
'realTime': False, "realTime": False,
'customDirFormat': '%ar_album%/%album%', "customDirFormat": "%ar_album%/%album%",
'customTrackFormat': '%tracknum%. %music%', "customTrackFormat": "%tracknum%. %music%",
'tracknum_padding': True, "tracknum_padding": True,
'save_cover': True, "save_cover": True,
'maxConcurrentDownloads': 3, "maxConcurrentDownloads": 3,
'maxRetries': 3, "maxRetries": 3,
'retryDelaySeconds': 5, "retryDelaySeconds": 5,
'retry_delay_increase': 5, "retry_delay_increase": 5,
'convertTo': None, "convertTo": None,
'bitrate': None "bitrate": None,
} }
def get_config_params(): def get_config_params():
""" """
Get configuration parameters from the config file. Get configuration parameters from the config file.
@@ -59,11 +62,11 @@ def get_config_params():
if not CONFIG_FILE_PATH.exists(): if not CONFIG_FILE_PATH.exists():
logger.info(f"{CONFIG_FILE_PATH} not found. Creating with default values.") logger.info(f"{CONFIG_FILE_PATH} not found. Creating with default values.")
with open(CONFIG_FILE_PATH, 'w') as f: with open(CONFIG_FILE_PATH, "w") as f:
json.dump(DEFAULT_MAIN_CONFIG, f, indent=4) json.dump(DEFAULT_MAIN_CONFIG, f, indent=4)
return DEFAULT_MAIN_CONFIG.copy() # Return a copy of defaults return DEFAULT_MAIN_CONFIG.copy() # Return a copy of defaults
with open(CONFIG_FILE_PATH, 'r') as f: with open(CONFIG_FILE_PATH, "r") as f:
config = json.load(f) config = json.load(f)
# Ensure all default keys are present in the loaded config # Ensure all default keys are present in the loaded config
@@ -74,48 +77,54 @@ def get_config_params():
updated = True updated = True
if updated: if updated:
logger.info(f"Configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults.") logger.info(
with open(CONFIG_FILE_PATH, 'w') as f: f"Configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults."
)
with open(CONFIG_FILE_PATH, "w") as f:
json.dump(config, f, indent=4) json.dump(config, f, indent=4)
return config return config
except Exception as e: except Exception as e:
logger.error(f"Error reading or creating config at {CONFIG_FILE_PATH}: {e}", exc_info=True) logger.error(
f"Error reading or creating config at {CONFIG_FILE_PATH}: {e}",
exc_info=True,
)
# Return defaults if config read/create fails # Return defaults if config read/create fails
return DEFAULT_MAIN_CONFIG.copy() return DEFAULT_MAIN_CONFIG.copy()
# Load configuration values we need for Celery # Load configuration values we need for Celery
config_params_values = get_config_params() # Renamed to avoid conflict with module name config_params_values = get_config_params() # Renamed to avoid conflict with module name
MAX_CONCURRENT_DL = config_params_values.get('maxConcurrentDownloads', 3) MAX_CONCURRENT_DL = config_params_values.get("maxConcurrentDownloads", 3)
MAX_RETRIES = config_params_values.get('maxRetries', 3) MAX_RETRIES = config_params_values.get("maxRetries", 3)
RETRY_DELAY = config_params_values.get('retryDelaySeconds', 5) RETRY_DELAY = config_params_values.get("retryDelaySeconds", 5)
RETRY_DELAY_INCREASE = config_params_values.get('retry_delay_increase', 5) RETRY_DELAY_INCREASE = config_params_values.get("retry_delay_increase", 5)
# Define task queues # Define task queues
task_queues = { task_queues = {
'default': { "default": {
'exchange': 'default', "exchange": "default",
'routing_key': 'default', "routing_key": "default",
}, },
'downloads': { "downloads": {
'exchange': 'downloads', "exchange": "downloads",
'routing_key': 'downloads', "routing_key": "downloads",
},
"utility_tasks": {
"exchange": "utility_tasks",
"routing_key": "utility_tasks",
}, },
'utility_tasks': {
'exchange': 'utility_tasks',
'routing_key': 'utility_tasks',
}
} }
# Set default queue # Set default queue
task_default_queue = 'downloads' task_default_queue = "downloads"
task_default_exchange = 'downloads' task_default_exchange = "downloads"
task_default_routing_key = 'downloads' task_default_routing_key = "downloads"
# Celery task settings # Celery task settings
task_serializer = 'json' task_serializer = "json"
accept_content = ['json'] accept_content = ["json"]
result_serializer = 'json' result_serializer = "json"
enable_utc = True enable_utc = True
# Configure worker concurrency based on MAX_CONCURRENT_DL # Configure worker concurrency based on MAX_CONCURRENT_DL
@@ -123,15 +132,15 @@ worker_concurrency = MAX_CONCURRENT_DL
# Configure task rate limiting - these are per-minute limits # Configure task rate limiting - these are per-minute limits
task_annotations = { task_annotations = {
'routes.utils.celery_tasks.download_track': { "routes.utils.celery_tasks.download_track": {
'rate_limit': f'{MAX_CONCURRENT_DL}/m', "rate_limit": f"{MAX_CONCURRENT_DL}/m",
}, },
'routes.utils.celery_tasks.download_album': { "routes.utils.celery_tasks.download_album": {
'rate_limit': f'{MAX_CONCURRENT_DL}/m', "rate_limit": f"{MAX_CONCURRENT_DL}/m",
},
"routes.utils.celery_tasks.download_playlist": {
"rate_limit": f"{MAX_CONCURRENT_DL}/m",
}, },
'routes.utils.celery_tasks.download_playlist': {
'rate_limit': f'{MAX_CONCURRENT_DL}/m',
}
} }
# Configure retry settings # Configure retry settings
@@ -144,10 +153,10 @@ result_expires = 60 * 60 * 24 * 7 # 7 days
# Configure visibility timeout for task messages # Configure visibility timeout for task messages
broker_transport_options = { broker_transport_options = {
'visibility_timeout': 3600, # 1 hour "visibility_timeout": 3600, # 1 hour
'fanout_prefix': True, "fanout_prefix": True,
'fanout_patterns': True, "fanout_patterns": True,
'priority_steps': [0, 3, 6, 9], "priority_steps": [0, 3, 6, 9],
} }
# Important broker connection settings # Important broker connection settings

View File

@@ -1,26 +1,9 @@
import os
import json
import signal
import subprocess import subprocess
import logging import logging
import time import time
import atexit
from pathlib import Path
import threading import threading
import queue
import sys
import uuid
# Import Celery task utilities # Import Celery task utilities
from .celery_tasks import (
ProgressState,
get_task_info,
get_last_task_status,
store_task_status,
get_all_tasks as get_all_celery_tasks_info,
cleanup_stale_errors,
delayed_delete_task_data,
)
from .celery_config import get_config_params, MAX_CONCURRENT_DL from .celery_config import get_config_params, MAX_CONCURRENT_DL
# Configure logging # Configure logging

View File

@@ -1,33 +1,29 @@
import os
import json import json
import time import time
import uuid import uuid
import logging import logging
from datetime import datetime
from routes.utils.celery_tasks import ( from routes.utils.celery_tasks import (
celery_app,
download_track, download_track,
download_album, download_album,
download_playlist, download_playlist,
store_task_status, store_task_status,
store_task_info, store_task_info,
get_task_info, get_task_info,
get_task_status,
get_last_task_status, get_last_task_status,
cancel_task as cancel_celery_task, cancel_task as cancel_celery_task,
retry_task as retry_celery_task, retry_task as retry_celery_task,
get_all_tasks, get_all_tasks,
ProgressState ProgressState,
) )
# Configure logging # Configure logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Load configuration # Load configuration
CONFIG_PATH = './data/config/main.json' CONFIG_PATH = "./data/config/main.json"
try: try:
with open(CONFIG_PATH, 'r') as f: with open(CONFIG_PATH, "r") as f:
config_data = json.load(f) config_data = json.load(f)
MAX_CONCURRENT_DL = config_data.get("maxConcurrentDownloads", 3) MAX_CONCURRENT_DL = config_data.get("maxConcurrentDownloads", 3)
except Exception as e: except Exception as e:
@@ -35,6 +31,7 @@ except Exception as e:
# Fallback default # Fallback default
MAX_CONCURRENT_DL = 3 MAX_CONCURRENT_DL = 3
def get_config_params(): def get_config_params():
""" """
Get common download parameters from the config file. Get common download parameters from the config file.
@@ -44,47 +41,48 @@ def get_config_params():
dict: A dictionary containing common parameters from config dict: A dictionary containing common parameters from config
""" """
try: try:
with open(CONFIG_PATH, 'r') as f: with open(CONFIG_PATH, "r") as f:
config = json.load(f) config = json.load(f)
return { return {
'spotify': config.get('spotify', ''), "spotify": config.get("spotify", ""),
'deezer': config.get('deezer', ''), "deezer": config.get("deezer", ""),
'fallback': config.get('fallback', False), "fallback": config.get("fallback", False),
'spotifyQuality': config.get('spotifyQuality', 'NORMAL'), "spotifyQuality": config.get("spotifyQuality", "NORMAL"),
'deezerQuality': config.get('deezerQuality', 'MP3_128'), "deezerQuality": config.get("deezerQuality", "MP3_128"),
'realTime': config.get('realTime', False), "realTime": config.get("realTime", False),
'customDirFormat': config.get('customDirFormat', '%ar_album%/%album%'), "customDirFormat": config.get("customDirFormat", "%ar_album%/%album%"),
'customTrackFormat': config.get('customTrackFormat', '%tracknum%. %music%'), "customTrackFormat": config.get("customTrackFormat", "%tracknum%. %music%"),
'tracknum_padding': config.get('tracknum_padding', True), "tracknum_padding": config.get("tracknum_padding", True),
'save_cover': config.get('save_cover', True), "save_cover": config.get("save_cover", True),
'maxRetries': config.get('maxRetries', 3), "maxRetries": config.get("maxRetries", 3),
'retryDelaySeconds': config.get('retryDelaySeconds', 5), "retryDelaySeconds": config.get("retryDelaySeconds", 5),
'retry_delay_increase': config.get('retry_delay_increase', 5), "retry_delay_increase": config.get("retry_delay_increase", 5),
'convertTo': config.get('convertTo', None), "convertTo": config.get("convertTo", None),
'bitrate': config.get('bitrate', None) "bitrate": config.get("bitrate", None),
} }
except Exception as e: except Exception as e:
logger.error(f"Error reading config for parameters: {e}") logger.error(f"Error reading config for parameters: {e}")
# Return defaults if config read fails # Return defaults if config read fails
return { return {
'spotify': '', "spotify": "",
'deezer': '', "deezer": "",
'fallback': False, "fallback": False,
'spotifyQuality': 'NORMAL', "spotifyQuality": "NORMAL",
'deezerQuality': 'MP3_128', "deezerQuality": "MP3_128",
'realTime': False, "realTime": False,
'customDirFormat': '%ar_album%/%album%', "customDirFormat": "%ar_album%/%album%",
'customTrackFormat': '%tracknum%. %music%', "customTrackFormat": "%tracknum%. %music%",
'tracknum_padding': True, "tracknum_padding": True,
'save_cover': True, "save_cover": True,
'maxRetries': 3, "maxRetries": 3,
'retryDelaySeconds': 5, "retryDelaySeconds": 5,
'retry_delay_increase': 5, "retry_delay_increase": 5,
'convertTo': None, # Default for conversion "convertTo": None, # Default for conversion
'bitrate': None # Default for bitrate "bitrate": None, # Default for bitrate
} }
class CeleryDownloadQueueManager: class CeleryDownloadQueueManager:
""" """
Manages a queue of download tasks using Celery. Manages a queue of download tasks using Celery.
@@ -98,7 +96,9 @@ class CeleryDownloadQueueManager:
"""Initialize the Celery-based download queue manager""" """Initialize the Celery-based download queue manager"""
self.max_concurrent = MAX_CONCURRENT_DL self.max_concurrent = MAX_CONCURRENT_DL
self.paused = False self.paused = False
print(f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}") print(
f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}"
)
def add_task(self, task: dict, from_watch_job: bool = False): def add_task(self, task: dict, from_watch_job: bool = False):
""" """
@@ -121,14 +121,16 @@ class CeleryDownloadQueueManager:
incoming_type = task.get("download_type", "unknown") incoming_type = task.get("download_type", "unknown")
if not incoming_url: if not incoming_url:
logger.warning("Task being added with no URL. Duplicate check might be unreliable.") logger.warning(
"Task being added with no URL. Duplicate check might be unreliable."
)
NON_BLOCKING_STATES = [ NON_BLOCKING_STATES = [
ProgressState.COMPLETE, ProgressState.COMPLETE,
ProgressState.CANCELLED, ProgressState.CANCELLED,
ProgressState.ERROR, ProgressState.ERROR,
ProgressState.ERROR_RETRIED, ProgressState.ERROR_RETRIED,
ProgressState.ERROR_AUTO_CLEANED ProgressState.ERROR_AUTO_CLEANED,
] ]
all_existing_tasks_summary = get_all_tasks() all_existing_tasks_summary = get_all_tasks()
@@ -148,15 +150,18 @@ class CeleryDownloadQueueManager:
existing_type = existing_task_info.get("download_type") existing_type = existing_task_info.get("download_type")
existing_status = existing_last_status_obj.get("status") existing_status = existing_last_status_obj.get("status")
if (existing_url == incoming_url and if (
existing_type == incoming_type and existing_url == incoming_url
existing_status not in NON_BLOCKING_STATES): and existing_type == incoming_type
and existing_status not in NON_BLOCKING_STATES
):
message = f"Duplicate download: URL '{incoming_url}' (type: {incoming_type}) is already being processed by task {existing_task_id} (status: {existing_status})." message = f"Duplicate download: URL '{incoming_url}' (type: {incoming_type}) is already being processed by task {existing_task_id} (status: {existing_status})."
logger.warning(message) logger.warning(message)
if from_watch_job: if from_watch_job:
logger.info(f"Task from watch job for {incoming_url} not queued due to active duplicate {existing_task_id}.") logger.info(
f"Task from watch job for {incoming_url} not queued due to active duplicate {existing_task_id}."
)
return None # Skip execution for watch jobs return None # Skip execution for watch jobs
else: else:
# Create a new task_id for this duplicate request and mark it as an error # Create a new task_id for this duplicate request and mark it as an error
@@ -167,9 +172,11 @@ class CeleryDownloadQueueManager:
"name": task.get("name", "Duplicate Task"), "name": task.get("name", "Duplicate Task"),
"artist": task.get("artist", ""), "artist": task.get("artist", ""),
"url": incoming_url, "url": incoming_url,
"original_request": task.get("orig_request", task.get("original_request", {})), "original_request": task.get(
"orig_request", task.get("original_request", {})
),
"created_at": time.time(), "created_at": time.time(),
"is_duplicate_error_task": True "is_duplicate_error_task": True,
} }
store_task_info(error_task_id, error_task_info_payload) store_task_info(error_task_id, error_task_info_payload)
error_status_payload = { error_status_payload = {
@@ -179,14 +186,16 @@ class CeleryDownloadQueueManager:
"timestamp": time.time(), "timestamp": time.time(),
"type": error_task_info_payload["type"], "type": error_task_info_payload["type"],
"name": error_task_info_payload["name"], "name": error_task_info_payload["name"],
"artist": error_task_info_payload["artist"] "artist": error_task_info_payload["artist"],
} }
store_task_status(error_task_id, error_status_payload) store_task_status(error_task_id, error_status_payload)
return error_task_id # Return the ID of this new error-state task return error_task_id # Return the ID of this new error-state task
task_id = str(uuid.uuid4()) task_id = str(uuid.uuid4())
config_params = get_config_params() config_params = get_config_params()
original_request = task.get("orig_request", task.get("original_request", {})) original_request = task.get(
"orig_request", task.get("original_request", {})
)
complete_task = { complete_task = {
"download_type": incoming_type, "download_type": incoming_type,
@@ -195,21 +204,42 @@ class CeleryDownloadQueueManager:
"artist": task.get("artist", ""), "artist": task.get("artist", ""),
"url": task.get("url", ""), "url": task.get("url", ""),
"retry_url": task.get("retry_url", ""), "retry_url": task.get("retry_url", ""),
"main": original_request.get("main", config_params['deezer']), "main": original_request.get("main", config_params["deezer"]),
"fallback": original_request.get("fallback", "fallback": original_request.get(
config_params['spotify'] if config_params['fallback'] else None), "fallback",
"quality": original_request.get("quality", config_params['deezerQuality']), config_params["spotify"] if config_params["fallback"] else None,
"fall_quality": original_request.get("fall_quality", config_params['spotifyQuality']), ),
"real_time": self._parse_bool_param(original_request.get("real_time"), config_params['realTime']), "quality": original_request.get(
"custom_dir_format": original_request.get("custom_dir_format", config_params['customDirFormat']), "quality", config_params["deezerQuality"]
"custom_track_format": original_request.get("custom_track_format", config_params['customTrackFormat']), ),
"pad_tracks": self._parse_bool_param(original_request.get("tracknum_padding"), config_params['tracknum_padding']), "fall_quality": original_request.get(
"save_cover": self._parse_bool_param(original_request.get("save_cover"), config_params['save_cover']), "fall_quality", config_params["spotifyQuality"]
"convertTo": original_request.get("convertTo", config_params.get('convertTo')), ),
"bitrate": original_request.get("bitrate", config_params.get('bitrate')), "real_time": self._parse_bool_param(
original_request.get("real_time"), config_params["realTime"]
),
"custom_dir_format": original_request.get(
"custom_dir_format", config_params["customDirFormat"]
),
"custom_track_format": original_request.get(
"custom_track_format", config_params["customTrackFormat"]
),
"pad_tracks": self._parse_bool_param(
original_request.get("tracknum_padding"),
config_params["tracknum_padding"],
),
"save_cover": self._parse_bool_param(
original_request.get("save_cover"), config_params["save_cover"]
),
"convertTo": original_request.get(
"convertTo", config_params.get("convertTo")
),
"bitrate": original_request.get(
"bitrate", config_params.get("bitrate")
),
"retry_count": 0, "retry_count": 0,
"original_request": original_request, "original_request": original_request,
"created_at": time.time() "created_at": time.time(),
} }
# If from_watch_job is True, ensure track_details_for_db is passed through # If from_watch_job is True, ensure track_details_for_db is passed through
@@ -217,20 +247,23 @@ class CeleryDownloadQueueManager:
complete_task["track_details_for_db"] = task["track_details_for_db"] complete_task["track_details_for_db"] = task["track_details_for_db"]
store_task_info(task_id, complete_task) store_task_info(task_id, complete_task)
store_task_status(task_id, { store_task_status(
task_id,
{
"status": ProgressState.QUEUED, "status": ProgressState.QUEUED,
"timestamp": time.time(), "timestamp": time.time(),
"type": complete_task["type"], "type": complete_task["type"],
"name": complete_task["name"], "name": complete_task["name"],
"artist": complete_task["artist"], "artist": complete_task["artist"],
"retry_count": 0, "retry_count": 0,
"queue_position": len(get_all_tasks()) + 1 "queue_position": len(get_all_tasks()) + 1,
}) },
)
celery_task_map = { celery_task_map = {
"track": download_track, "track": download_track,
"album": download_album, "album": download_album,
"playlist": download_playlist "playlist": download_playlist,
} }
task_func = celery_task_map.get(incoming_type) task_func = celery_task_map.get(incoming_type)
@@ -238,30 +271,38 @@ class CeleryDownloadQueueManager:
task_func.apply_async( task_func.apply_async(
kwargs=complete_task, kwargs=complete_task,
task_id=task_id, task_id=task_id,
countdown=0 if not self.paused else 3600 countdown=0 if not self.paused else 3600,
)
logger.info(
f"Added {incoming_type} download task {task_id} to Celery queue."
) )
logger.info(f"Added {incoming_type} download task {task_id} to Celery queue.")
return task_id return task_id
else: else:
store_task_status(task_id, { store_task_status(
task_id,
{
"status": ProgressState.ERROR, "status": ProgressState.ERROR,
"message": f"Unsupported download type: {incoming_type}", "message": f"Unsupported download type: {incoming_type}",
"timestamp": time.time() "timestamp": time.time(),
}) },
)
logger.error(f"Unsupported download type: {incoming_type}") logger.error(f"Unsupported download type: {incoming_type}")
return task_id return task_id
except Exception as e: except Exception as e:
logger.error(f"Error adding task to Celery queue: {e}", exc_info=True) logger.error(f"Error adding task to Celery queue: {e}", exc_info=True)
error_task_id = str(uuid.uuid4()) error_task_id = str(uuid.uuid4())
store_task_status(error_task_id, { store_task_status(
error_task_id,
{
"status": ProgressState.ERROR, "status": ProgressState.ERROR,
"message": f"Error adding task to queue: {str(e)}", "message": f"Error adding task to queue: {str(e)}",
"timestamp": time.time(), "timestamp": time.time(),
"type": task.get("type", "unknown"), "type": task.get("type", "unknown"),
"name": task.get("name", "Unknown"), "name": task.get("name", "Unknown"),
"artist": task.get("artist", "") "artist": task.get("artist", ""),
}) },
)
return error_task_id return error_task_id
def _parse_bool_param(self, param_value, default_value=False): def _parse_bool_param(self, param_value, default_value=False):
@@ -271,7 +312,7 @@ class CeleryDownloadQueueManager:
if isinstance(param_value, bool): if isinstance(param_value, bool):
return param_value return param_value
if isinstance(param_value, str): if isinstance(param_value, str):
return param_value.lower() in ['true', '1', 'yes', 'y', 'on'] return param_value.lower() in ["true", "1", "yes", "y", "on"]
return bool(param_value) return bool(param_value)
def cancel_task(self, task_id): def cancel_task(self, task_id):
@@ -321,7 +362,7 @@ class CeleryDownloadQueueManager:
return { return {
"status": "all_cancelled", "status": "all_cancelled",
"cancelled_count": cancelled_count, "cancelled_count": cancelled_count,
"total_tasks": len(tasks) "total_tasks": len(tasks),
} }
def get_queue_status(self): def get_queue_status(self):
@@ -346,32 +387,35 @@ class CeleryDownloadQueueManager:
if status == ProgressState.PROCESSING: if status == ProgressState.PROCESSING:
running_count += 1 running_count += 1
running_tasks.append({ running_tasks.append(
{
"task_id": task.get("task_id"), "task_id": task.get("task_id"),
"name": task.get("name", "Unknown"), "name": task.get("name", "Unknown"),
"type": task.get("type", "unknown"), "type": task.get("type", "unknown"),
"download_type": task.get("download_type", "unknown") "download_type": task.get("download_type", "unknown"),
}) }
)
elif status == ProgressState.QUEUED: elif status == ProgressState.QUEUED:
pending_count += 1 pending_count += 1
elif status == ProgressState.ERROR: elif status == ProgressState.ERROR:
failed_count += 1 failed_count += 1
# Get task info for retry information # Get task info for retry information
task_info = get_task_info(task.get("task_id"))
last_status = get_last_task_status(task.get("task_id")) last_status = get_last_task_status(task.get("task_id"))
retry_count = 0 retry_count = 0
if last_status: if last_status:
retry_count = last_status.get("retry_count", 0) retry_count = last_status.get("retry_count", 0)
failed_tasks.append({ failed_tasks.append(
{
"task_id": task.get("task_id"), "task_id": task.get("task_id"),
"name": task.get("name", "Unknown"), "name": task.get("name", "Unknown"),
"type": task.get("type", "unknown"), "type": task.get("type", "unknown"),
"download_type": task.get("download_type", "unknown"), "download_type": task.get("download_type", "unknown"),
"retry_count": retry_count "retry_count": retry_count,
}) }
)
return { return {
"running": running_count, "running": running_count,
@@ -380,7 +424,7 @@ class CeleryDownloadQueueManager:
"max_concurrent": self.max_concurrent, "max_concurrent": self.max_concurrent,
"paused": self.paused, "paused": self.paused,
"running_tasks": running_tasks, "running_tasks": running_tasks,
"failed_tasks": failed_tasks "failed_tasks": failed_tasks,
} }
def pause(self): def pause(self):
@@ -392,12 +436,15 @@ class CeleryDownloadQueueManager:
for task in tasks: for task in tasks:
if task.get("status") == ProgressState.QUEUED: if task.get("status") == ProgressState.QUEUED:
# Update status to indicate the task is paused # Update status to indicate the task is paused
store_task_status(task.get("task_id"), { store_task_status(
task.get("task_id"),
{
"status": ProgressState.QUEUED, "status": ProgressState.QUEUED,
"paused": True, "paused": True,
"message": "Queue is paused, task will run when queue is resumed", "message": "Queue is paused, task will run when queue is resumed",
"timestamp": time.time() "timestamp": time.time(),
}) },
)
logger.info("Download queue processing paused") logger.info("Download queue processing paused")
return {"status": "paused"} return {"status": "paused"}
@@ -418,31 +465,25 @@ class CeleryDownloadQueueManager:
continue continue
# Update status to indicate the task is no longer paused # Update status to indicate the task is no longer paused
store_task_status(task_id, { store_task_status(
task_id,
{
"status": ProgressState.QUEUED, "status": ProgressState.QUEUED,
"paused": False, "paused": False,
"message": "Queue resumed, task will run soon", "message": "Queue resumed, task will run soon",
"timestamp": time.time() "timestamp": time.time(),
}) },
)
# Reschedule the task to run immediately # Reschedule the task to run immediately
download_type = task_info.get("download_type", "unknown") download_type = task_info.get("download_type", "unknown")
if download_type == "track": if download_type == "track":
download_track.apply_async( download_track.apply_async(kwargs=task_info, task_id=task_id)
kwargs=task_info,
task_id=task_id
)
elif download_type == "album": elif download_type == "album":
download_album.apply_async( download_album.apply_async(kwargs=task_info, task_id=task_id)
kwargs=task_info,
task_id=task_id
)
elif download_type == "playlist": elif download_type == "playlist":
download_playlist.apply_async( download_playlist.apply_async(kwargs=task_info, task_id=task_id)
kwargs=task_info,
task_id=task_id
)
logger.info("Download queue processing resumed") logger.info("Download queue processing resumed")
return {"status": "resumed"} return {"status": "resumed"}
@@ -462,5 +503,6 @@ class CeleryDownloadQueueManager:
logger.info("Celery Download Queue Manager stopped") logger.info("Celery Download Queue Manager stopped")
return {"status": "stopped"} return {"status": "stopped"}
# Create the global instance # Create the global instance
download_queue_manager = CeleryDownloadQueueManager() download_queue_manager = CeleryDownloadQueueManager()

View File

@@ -1,9 +1,7 @@
import time import time
import json import json
import uuid
import logging import logging
import traceback import traceback
from datetime import datetime
from celery import Celery, Task, states from celery import Celery, Task, states
from celery.signals import ( from celery.signals import (
task_prerun, task_prerun,
@@ -14,17 +12,13 @@ from celery.signals import (
setup_logging, setup_logging,
) )
from celery.exceptions import Retry from celery.exceptions import Retry
import os # Added for path operations
from pathlib import Path # Added for path operations from pathlib import Path # Added for path operations
# Configure logging
logger = logging.getLogger(__name__)
# Setup Redis and Celery # Setup Redis and Celery
from routes.utils.celery_config import ( from routes.utils.celery_config import (
REDIS_URL, REDIS_URL,
REDIS_BACKEND, REDIS_BACKEND,
REDIS_PASSWORD,
get_config_params, get_config_params,
) )
@@ -37,6 +31,12 @@ from routes.utils.watch.db import (
# Import history manager function # Import history manager function
from .history_manager import add_entry_to_history from .history_manager import add_entry_to_history
# Create Redis connection for storing task data that's not part of the Celery result backend
import redis
# Configure logging
logger = logging.getLogger(__name__)
# Initialize Celery app # Initialize Celery app
celery_app = Celery( celery_app = Celery(
"routes.utils.celery_tasks", broker=REDIS_URL, backend=REDIS_BACKEND "routes.utils.celery_tasks", broker=REDIS_URL, backend=REDIS_BACKEND
@@ -45,8 +45,6 @@ celery_app = Celery(
# Load Celery config # Load Celery config
celery_app.config_from_object("routes.utils.celery_config") celery_app.config_from_object("routes.utils.celery_config")
# Create Redis connection for storing task data that's not part of the Celery result backend
import redis
redis_client = redis.Redis.from_url(REDIS_URL) redis_client = redis.Redis.from_url(REDIS_URL)

View File

@@ -2,7 +2,6 @@ import json
from pathlib import Path from pathlib import Path
import shutil import shutil
import sqlite3 import sqlite3
import traceback # For logging detailed error messages
import time # For retry delays import time # For retry delays
import logging import logging
@@ -14,17 +13,17 @@ import logging
logger = logging.getLogger(__name__) # Assuming logger is configured elsewhere logger = logging.getLogger(__name__) # Assuming logger is configured elsewhere
# --- New Database and Path Definitions --- # --- New Database and Path Definitions ---
CREDS_BASE_DIR = Path('./data/creds') CREDS_BASE_DIR = Path("./data/creds")
ACCOUNTS_DB_PATH = CREDS_BASE_DIR / 'accounts.db' ACCOUNTS_DB_PATH = CREDS_BASE_DIR / "accounts.db"
BLOBS_DIR = CREDS_BASE_DIR / 'blobs' BLOBS_DIR = CREDS_BASE_DIR / "blobs"
GLOBAL_SEARCH_JSON_PATH = CREDS_BASE_DIR / 'search.json' # Global Spotify API creds GLOBAL_SEARCH_JSON_PATH = CREDS_BASE_DIR / "search.json" # Global Spotify API creds
EXPECTED_SPOTIFY_TABLE_COLUMNS = { EXPECTED_SPOTIFY_TABLE_COLUMNS = {
"name": "TEXT PRIMARY KEY", "name": "TEXT PRIMARY KEY",
# client_id and client_secret are now global # client_id and client_secret are now global
"region": "TEXT", # ISO 3166-1 alpha-2 "region": "TEXT", # ISO 3166-1 alpha-2
"created_at": "REAL", "created_at": "REAL",
"updated_at": "REAL" "updated_at": "REAL",
} }
EXPECTED_DEEZER_TABLE_COLUMNS = { EXPECTED_DEEZER_TABLE_COLUMNS = {
@@ -32,9 +31,10 @@ EXPECTED_DEEZER_TABLE_COLUMNS = {
"arl": "TEXT", "arl": "TEXT",
"region": "TEXT", # ISO 3166-1 alpha-2 "region": "TEXT", # ISO 3166-1 alpha-2
"created_at": "REAL", "created_at": "REAL",
"updated_at": "REAL" "updated_at": "REAL",
} }
def _get_db_connection(): def _get_db_connection():
ACCOUNTS_DB_PATH.parent.mkdir(parents=True, exist_ok=True) ACCOUNTS_DB_PATH.parent.mkdir(parents=True, exist_ok=True)
BLOBS_DIR.mkdir(parents=True, exist_ok=True) # Ensure blobs directory also exists BLOBS_DIR.mkdir(parents=True, exist_ok=True) # Ensure blobs directory also exists
@@ -42,7 +42,10 @@ def _get_db_connection():
conn.row_factory = sqlite3.Row conn.row_factory = sqlite3.Row
return conn return conn
def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_columns: dict):
def _ensure_table_schema(
cursor: sqlite3.Cursor, table_name: str, expected_columns: dict
):
"""Ensures the given table has all expected columns, adding them if necessary.""" """Ensures the given table has all expected columns, adding them if necessary."""
try: try:
cursor.execute(f"PRAGMA table_info({table_name})") cursor.execute(f"PRAGMA table_info({table_name})")
@@ -53,17 +56,21 @@ def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_colum
for col_name, col_type in expected_columns.items(): for col_name, col_type in expected_columns.items():
if col_name not in existing_column_names: if col_name not in existing_column_names:
# Basic protection against altering PK after creation if table is not empty # Basic protection against altering PK after creation if table is not empty
if 'PRIMARY KEY' in col_type.upper() and existing_columns_info: if "PRIMARY KEY" in col_type.upper() and existing_columns_info:
logger.warning( logger.warning(
f"Column '{col_name}' is part of PRIMARY KEY for table '{table_name}' " f"Column '{col_name}' is part of PRIMARY KEY for table '{table_name}' "
f"and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN." f"and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN."
) )
continue continue
col_type_for_add = col_type.replace(' PRIMARY KEY', '').strip() col_type_for_add = col_type.replace(" PRIMARY KEY", "").strip()
try: try:
cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_for_add}") cursor.execute(
logger.info(f"Added missing column '{col_name} {col_type_for_add}' to table '{table_name}'.") f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_for_add}"
)
logger.info(
f"Added missing column '{col_name} {col_type_for_add}' to table '{table_name}'."
)
added_columns = True added_columns = True
except sqlite3.OperationalError as alter_e: except sqlite3.OperationalError as alter_e:
logger.warning( logger.warning(
@@ -72,9 +79,12 @@ def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_colum
) )
return added_columns return added_columns
except sqlite3.Error as e: except sqlite3.Error as e:
logger.error(f"Error ensuring schema for table '{table_name}': {e}", exc_info=True) logger.error(
f"Error ensuring schema for table '{table_name}': {e}", exc_info=True
)
return False return False
def init_credentials_db(): def init_credentials_db():
"""Initializes the accounts.db and its tables if they don't exist.""" """Initializes the accounts.db and its tables if they don't exist."""
try: try:
@@ -105,46 +115,72 @@ def init_credentials_db():
# Ensure global search.json exists, create if not # Ensure global search.json exists, create if not
if not GLOBAL_SEARCH_JSON_PATH.exists(): if not GLOBAL_SEARCH_JSON_PATH.exists():
logger.info(f"Global Spotify search credential file not found at {GLOBAL_SEARCH_JSON_PATH}. Creating empty file.") logger.info(
with open(GLOBAL_SEARCH_JSON_PATH, 'w') as f_search: f"Global Spotify search credential file not found at {GLOBAL_SEARCH_JSON_PATH}. Creating empty file."
json.dump({"client_id": "", "client_secret": ""}, f_search, indent=4) )
with open(GLOBAL_SEARCH_JSON_PATH, "w") as f_search:
json.dump(
{"client_id": "", "client_secret": ""}, f_search, indent=4
)
conn.commit() conn.commit()
logger.info(f"Credentials database initialized/schema checked at {ACCOUNTS_DB_PATH}") logger.info(
f"Credentials database initialized/schema checked at {ACCOUNTS_DB_PATH}"
)
except sqlite3.Error as e: except sqlite3.Error as e:
logger.error(f"Error initializing credentials database: {e}", exc_info=True) logger.error(f"Error initializing credentials database: {e}", exc_info=True)
raise raise
def _get_global_spotify_api_creds(): def _get_global_spotify_api_creds():
"""Loads client_id and client_secret from the global search.json.""" """Loads client_id and client_secret from the global search.json."""
if GLOBAL_SEARCH_JSON_PATH.exists(): if GLOBAL_SEARCH_JSON_PATH.exists():
try: try:
with open(GLOBAL_SEARCH_JSON_PATH, 'r') as f: with open(GLOBAL_SEARCH_JSON_PATH, "r") as f:
search_data = json.load(f) search_data = json.load(f)
client_id = search_data.get('client_id') client_id = search_data.get("client_id")
client_secret = search_data.get('client_secret') client_secret = search_data.get("client_secret")
if client_id and client_secret: if client_id and client_secret:
return client_id, client_secret return client_id, client_secret
else: else:
logger.warning(f"Global Spotify API credentials in {GLOBAL_SEARCH_JSON_PATH} are incomplete.") logger.warning(
f"Global Spotify API credentials in {GLOBAL_SEARCH_JSON_PATH} are incomplete."
)
except Exception as e: except Exception as e:
logger.error(f"Error reading global Spotify API credentials from {GLOBAL_SEARCH_JSON_PATH}: {e}", exc_info=True) logger.error(
f"Error reading global Spotify API credentials from {GLOBAL_SEARCH_JSON_PATH}: {e}",
exc_info=True,
)
else: else:
logger.warning(f"Global Spotify API credential file {GLOBAL_SEARCH_JSON_PATH} not found.") logger.warning(
return None, None # Return None if file doesn't exist or creds are incomplete/invalid f"Global Spotify API credential file {GLOBAL_SEARCH_JSON_PATH} not found."
)
return (
None,
None,
) # Return None if file doesn't exist or creds are incomplete/invalid
def save_global_spotify_api_creds(client_id: str, client_secret: str): def save_global_spotify_api_creds(client_id: str, client_secret: str):
"""Saves client_id and client_secret to the global search.json.""" """Saves client_id and client_secret to the global search.json."""
try: try:
GLOBAL_SEARCH_JSON_PATH.parent.mkdir(parents=True, exist_ok=True) GLOBAL_SEARCH_JSON_PATH.parent.mkdir(parents=True, exist_ok=True)
with open(GLOBAL_SEARCH_JSON_PATH, 'w') as f: with open(GLOBAL_SEARCH_JSON_PATH, "w") as f:
json.dump({"client_id": client_id, "client_secret": client_secret}, f, indent=4) json.dump(
logger.info(f"Global Spotify API credentials saved to {GLOBAL_SEARCH_JSON_PATH}") {"client_id": client_id, "client_secret": client_secret}, f, indent=4
)
logger.info(
f"Global Spotify API credentials saved to {GLOBAL_SEARCH_JSON_PATH}"
)
return True return True
except Exception as e: except Exception as e:
logger.error(f"Error saving global Spotify API credentials to {GLOBAL_SEARCH_JSON_PATH}: {e}", exc_info=True) logger.error(
f"Error saving global Spotify API credentials to {GLOBAL_SEARCH_JSON_PATH}: {e}",
exc_info=True,
)
return False return False
def _validate_with_retry(service_name, account_name, validation_data): def _validate_with_retry(service_name, account_name, validation_data):
""" """
Attempts to validate credentials with retries for connection errors. Attempts to validate credentials with retries for connection errors.
@@ -161,51 +197,76 @@ def _validate_with_retry(service_name, account_name, validation_data):
for attempt in range(max_retries): for attempt in range(max_retries):
try: try:
if service_name == 'spotify': if service_name == "spotify":
# For Spotify, validation uses the account's blob and GLOBAL API creds # For Spotify, validation uses the account's blob and GLOBAL API creds
global_client_id, global_client_secret = _get_global_spotify_api_creds() global_client_id, global_client_secret = _get_global_spotify_api_creds()
if not global_client_id or not global_client_secret: if not global_client_id or not global_client_secret:
raise ValueError("Global Spotify API client_id or client_secret not configured for validation.") raise ValueError(
"Global Spotify API client_id or client_secret not configured for validation."
)
blob_file_path = validation_data.get('blob_file_path') blob_file_path = validation_data.get("blob_file_path")
if not blob_file_path or not Path(blob_file_path).exists(): if not blob_file_path or not Path(blob_file_path).exists():
raise ValueError(f"Spotify blob file missing for validation of account {account_name}") raise ValueError(
SpoLogin(credentials_path=str(blob_file_path), spotify_client_id=global_client_id, spotify_client_secret=global_client_secret) f"Spotify blob file missing for validation of account {account_name}"
)
SpoLogin(
credentials_path=str(blob_file_path),
spotify_client_id=global_client_id,
spotify_client_secret=global_client_secret,
)
else: # Deezer else: # Deezer
arl = validation_data.get('arl') arl = validation_data.get("arl")
if not arl: if not arl:
raise ValueError("Missing 'arl' for Deezer validation.") raise ValueError("Missing 'arl' for Deezer validation.")
DeeLogin(arl=arl) DeeLogin(arl=arl)
logger.info(f"{service_name.capitalize()} credentials for {account_name} validated successfully (attempt {attempt + 1}).") logger.info(
f"{service_name.capitalize()} credentials for {account_name} validated successfully (attempt {attempt + 1})."
)
return True return True
except Exception as e: except Exception as e:
last_exception = e last_exception = e
error_str = str(e).lower() error_str = str(e).lower()
is_connection_error = ( is_connection_error = (
"connection refused" in error_str or "connection error" in error_str or "connection refused" in error_str
"timeout" in error_str or "temporary failure in name resolution" in error_str or or "connection error" in error_str
"dns lookup failed" in error_str or "network is unreachable" in error_str or or "timeout" in error_str
"ssl handshake failed" in error_str or "connection reset by peer" in error_str or "temporary failure in name resolution" in error_str
or "dns lookup failed" in error_str
or "network is unreachable" in error_str
or "ssl handshake failed" in error_str
or "connection reset by peer" in error_str
) )
if is_connection_error and attempt < max_retries - 1: if is_connection_error and attempt < max_retries - 1:
retry_delay = 2 + attempt retry_delay = 2 + attempt
logger.warning(f"Validation for {account_name} ({service_name}) failed (attempt {attempt + 1}) due to connection issue: {e}. Retrying in {retry_delay}s...") logger.warning(
f"Validation for {account_name} ({service_name}) failed (attempt {attempt + 1}) due to connection issue: {e}. Retrying in {retry_delay}s..."
)
time.sleep(retry_delay) time.sleep(retry_delay)
continue continue
else: else:
logger.error(f"Validation for {account_name} ({service_name}) failed on attempt {attempt + 1} (non-retryable or max retries).") logger.error(
f"Validation for {account_name} ({service_name}) failed on attempt {attempt + 1} (non-retryable or max retries)."
)
break break
if last_exception: if last_exception:
base_error_message = str(last_exception).splitlines()[-1] base_error_message = str(last_exception).splitlines()[-1]
detailed_error_message = f"Invalid {service_name} credentials for {account_name}. Verification failed: {base_error_message}" detailed_error_message = f"Invalid {service_name} credentials for {account_name}. Verification failed: {base_error_message}"
if service_name == 'spotify' and "incorrect padding" in base_error_message.lower(): if (
detailed_error_message += ". Hint: For Spotify, ensure the credentials blob content is correct." service_name == "spotify"
and "incorrect padding" in base_error_message.lower()
):
detailed_error_message += (
". Hint: For Spotify, ensure the credentials blob content is correct."
)
raise ValueError(detailed_error_message) raise ValueError(detailed_error_message)
else: else:
raise ValueError(f"Invalid {service_name} credentials for {account_name}. Verification failed (unknown reason after retries).") raise ValueError(
f"Invalid {service_name} credentials for {account_name}. Verification failed (unknown reason after retries)."
)
def create_credential(service, name, data): def create_credential(service, name, data):
@@ -219,7 +280,7 @@ def create_credential(service, name, data):
Raises: Raises:
ValueError, FileExistsError ValueError, FileExistsError
""" """
if service not in ['spotify', 'deezer']: if service not in ["spotify", "deezer"]:
raise ValueError("Service must be 'spotify' or 'deezer'") raise ValueError("Service must be 'spotify' or 'deezer'")
if not name or not isinstance(name, str): if not name or not isinstance(name, str):
raise ValueError("Credential name must be a non-empty string.") raise ValueError("Credential name must be a non-empty string.")
@@ -230,43 +291,56 @@ def create_credential(service, name, data):
cursor = conn.cursor() cursor = conn.cursor()
conn.row_factory = sqlite3.Row conn.row_factory = sqlite3.Row
try: try:
if service == 'spotify': if service == "spotify":
required_fields = {'region', 'blob_content'} # client_id/secret are global required_fields = {
"region",
"blob_content",
} # client_id/secret are global
if not required_fields.issubset(data.keys()): if not required_fields.issubset(data.keys()):
raise ValueError(f"Missing fields for Spotify. Required: {required_fields}") raise ValueError(
f"Missing fields for Spotify. Required: {required_fields}"
)
blob_path = BLOBS_DIR / name / 'credentials.json' blob_path = BLOBS_DIR / name / "credentials.json"
validation_data = {'blob_file_path': str(blob_path)} # Validation uses global API creds validation_data = {
"blob_file_path": str(blob_path)
} # Validation uses global API creds
blob_path.parent.mkdir(parents=True, exist_ok=True) blob_path.parent.mkdir(parents=True, exist_ok=True)
with open(blob_path, 'w') as f_blob: with open(blob_path, "w") as f_blob:
if isinstance(data['blob_content'], dict): if isinstance(data["blob_content"], dict):
json.dump(data['blob_content'], f_blob, indent=4) json.dump(data["blob_content"], f_blob, indent=4)
else: # assume string else: # assume string
f_blob.write(data['blob_content']) f_blob.write(data["blob_content"])
try: try:
_validate_with_retry('spotify', name, validation_data) _validate_with_retry("spotify", name, validation_data)
cursor.execute( cursor.execute(
"INSERT INTO spotify (name, region, created_at, updated_at) VALUES (?, ?, ?, ?)", "INSERT INTO spotify (name, region, created_at, updated_at) VALUES (?, ?, ?, ?)",
(name, data['region'], current_time, current_time) (name, data["region"], current_time, current_time),
) )
except Exception as e: except Exception:
if blob_path.exists(): blob_path.unlink() # Cleanup blob if blob_path.exists():
if blob_path.parent.exists() and not any(blob_path.parent.iterdir()): blob_path.parent.rmdir() blob_path.unlink() # Cleanup blob
if blob_path.parent.exists() and not any(
blob_path.parent.iterdir()
):
blob_path.parent.rmdir()
raise # Re-raise validation or DB error raise # Re-raise validation or DB error
elif service == 'deezer': elif service == "deezer":
required_fields = {'arl', 'region'} required_fields = {"arl", "region"}
if not required_fields.issubset(data.keys()): if not required_fields.issubset(data.keys()):
raise ValueError(f"Missing fields for Deezer. Required: {required_fields}") raise ValueError(
f"Missing fields for Deezer. Required: {required_fields}"
)
validation_data = {'arl': data['arl']} validation_data = {"arl": data["arl"]}
_validate_with_retry('deezer', name, validation_data) _validate_with_retry("deezer", name, validation_data)
cursor.execute( cursor.execute(
"INSERT INTO deezer (name, arl, region, created_at, updated_at) VALUES (?, ?, ?, ?, ?)", "INSERT INTO deezer (name, arl, region, created_at, updated_at) VALUES (?, ?, ?, ?, ?)",
(name, data['arl'], data['region'], current_time, current_time) (name, data["arl"], data["region"], current_time, current_time),
) )
conn.commit() conn.commit()
logger.info(f"Credential '{name}' for {service} created successfully.") logger.info(f"Credential '{name}' for {service} created successfully.")
@@ -274,7 +348,9 @@ def create_credential(service, name, data):
except sqlite3.IntegrityError: except sqlite3.IntegrityError:
raise FileExistsError(f"Credential '{name}' already exists for {service}.") raise FileExistsError(f"Credential '{name}' already exists for {service}.")
except Exception as e: except Exception as e:
logger.error(f"Error creating credential {name} for {service}: {e}", exc_info=True) logger.error(
f"Error creating credential {name} for {service}: {e}", exc_info=True
)
raise ValueError(f"Could not create credential: {e}") raise ValueError(f"Could not create credential: {e}")
@@ -285,7 +361,7 @@ def get_credential(service, name):
For Deezer, returns dict with name, arl, and region. For Deezer, returns dict with name, arl, and region.
Raises FileNotFoundError if the credential does not exist. Raises FileNotFoundError if the credential does not exist.
""" """
if service not in ['spotify', 'deezer']: if service not in ["spotify", "deezer"]:
raise ValueError("Service must be 'spotify' or 'deezer'") raise ValueError("Service must be 'spotify' or 'deezer'")
with _get_db_connection() as conn: with _get_db_connection() as conn:
@@ -299,53 +375,62 @@ def get_credential(service, name):
data = dict(row) data = dict(row)
if service == 'spotify': if service == "spotify":
blob_file_path = BLOBS_DIR / name / 'credentials.json' blob_file_path = BLOBS_DIR / name / "credentials.json"
data['blob_file_path'] = str(blob_file_path) # Keep for internal use data["blob_file_path"] = str(blob_file_path) # Keep for internal use
try: try:
with open(blob_file_path, 'r') as f_blob: with open(blob_file_path, "r") as f_blob:
blob_data = json.load(f_blob) blob_data = json.load(f_blob)
data['blob_content'] = blob_data data["blob_content"] = blob_data
except FileNotFoundError: except FileNotFoundError:
logger.warning(f"Spotify blob file not found for {name} at {blob_file_path} during get_credential.") logger.warning(
data['blob_content'] = None f"Spotify blob file not found for {name} at {blob_file_path} during get_credential."
)
data["blob_content"] = None
except json.JSONDecodeError: except json.JSONDecodeError:
logger.warning(f"Error decoding JSON from Spotify blob file for {name} at {blob_file_path}.") logger.warning(
data['blob_content'] = None f"Error decoding JSON from Spotify blob file for {name} at {blob_file_path}."
)
data["blob_content"] = None
except Exception as e: except Exception as e:
logger.error(f"Unexpected error reading Spotify blob for {name}: {e}", exc_info=True) logger.error(
data['blob_content'] = None f"Unexpected error reading Spotify blob for {name}: {e}",
exc_info=True,
)
data["blob_content"] = None
cleaned_data = { cleaned_data = {
'name': data.get('name'), "name": data.get("name"),
'region': data.get('region'), "region": data.get("region"),
'blob_content': data.get('blob_content') "blob_content": data.get("blob_content"),
} }
return cleaned_data return cleaned_data
elif service == 'deezer': elif service == "deezer":
cleaned_data = { cleaned_data = {
'name': data.get('name'), "name": data.get("name"),
'region': data.get('region'), "region": data.get("region"),
'arl': data.get('arl') "arl": data.get("arl"),
} }
return cleaned_data return cleaned_data
# Fallback, should not be reached if service is spotify or deezer # Fallback, should not be reached if service is spotify or deezer
return None return None
def list_credentials(service): def list_credentials(service):
if service not in ['spotify', 'deezer']: if service not in ["spotify", "deezer"]:
raise ValueError("Service must be 'spotify' or 'deezer'") raise ValueError("Service must be 'spotify' or 'deezer'")
with _get_db_connection() as conn: with _get_db_connection() as conn:
cursor = conn.cursor() cursor = conn.cursor()
conn.row_factory = sqlite3.Row conn.row_factory = sqlite3.Row
cursor.execute(f"SELECT name FROM {service}") cursor.execute(f"SELECT name FROM {service}")
return [row['name'] for row in cursor.fetchall()] return [row["name"] for row in cursor.fetchall()]
def delete_credential(service, name): def delete_credential(service, name):
if service not in ['spotify', 'deezer']: if service not in ["spotify", "deezer"]:
raise ValueError("Service must be 'spotify' or 'deezer'") raise ValueError("Service must be 'spotify' or 'deezer'")
with _get_db_connection() as conn: with _get_db_connection() as conn:
@@ -355,7 +440,7 @@ def delete_credential(service, name):
if cursor.rowcount == 0: if cursor.rowcount == 0:
raise FileNotFoundError(f"Credential '{name}' not found for {service}.") raise FileNotFoundError(f"Credential '{name}' not found for {service}.")
if service == 'spotify': if service == "spotify":
blob_dir = BLOBS_DIR / name blob_dir = BLOBS_DIR / name
if blob_dir.exists(): if blob_dir.exists():
shutil.rmtree(blob_dir) shutil.rmtree(blob_dir)
@@ -363,6 +448,7 @@ def delete_credential(service, name):
logger.info(f"Credential '{name}' for {service} deleted.") logger.info(f"Credential '{name}' for {service} deleted.")
return {"status": "deleted", "service": service, "name": name} return {"status": "deleted", "service": service, "name": name}
def edit_credential(service, name, new_data): def edit_credential(service, name, new_data):
""" """
Edits an existing credential. Edits an existing credential.
@@ -370,14 +456,16 @@ def edit_credential(service, name, new_data):
new_data for Deezer can include: arl, region. new_data for Deezer can include: arl, region.
Fields not in new_data remain unchanged. Fields not in new_data remain unchanged.
""" """
if service not in ['spotify', 'deezer']: if service not in ["spotify", "deezer"]:
raise ValueError("Service must be 'spotify' or 'deezer'") raise ValueError("Service must be 'spotify' or 'deezer'")
current_time = time.time() current_time = time.time()
# Fetch existing data first to preserve unchanged fields and for validation backup # Fetch existing data first to preserve unchanged fields and for validation backup
try: try:
existing_cred = get_credential(service, name) # This will raise FileNotFoundError if not found existing_cred = get_credential(
service, name
) # This will raise FileNotFoundError if not found
except FileNotFoundError: except FileNotFoundError:
raise raise
except Exception as e: # Catch other errors from get_credential except Exception as e: # Catch other errors from get_credential
@@ -389,75 +477,100 @@ def edit_credential(service, name, new_data):
cursor = conn.cursor() cursor = conn.cursor()
conn.row_factory = sqlite3.Row conn.row_factory = sqlite3.Row
if service == 'spotify': if service == "spotify":
# Prepare data for DB update # Prepare data for DB update
db_update_data = { db_update_data = {
'region': updated_fields.get('region', existing_cred['region']), "region": updated_fields.get("region", existing_cred["region"]),
'updated_at': current_time, "updated_at": current_time,
'name': name # for WHERE clause "name": name, # for WHERE clause
} }
blob_path = Path(existing_cred['blob_file_path']) # Use path from existing blob_path = Path(existing_cred["blob_file_path"]) # Use path from existing
original_blob_content = None original_blob_content = None
if blob_path.exists(): if blob_path.exists():
with open(blob_path, 'r') as f_orig_blob: with open(blob_path, "r") as f_orig_blob:
original_blob_content = f_orig_blob.read() original_blob_content = f_orig_blob.read()
# If blob_content is being updated, write it temporarily for validation # If blob_content is being updated, write it temporarily for validation
if 'blob_content' in updated_fields: if "blob_content" in updated_fields:
blob_path.parent.mkdir(parents=True, exist_ok=True) blob_path.parent.mkdir(parents=True, exist_ok=True)
with open(blob_path, 'w') as f_new_blob: with open(blob_path, "w") as f_new_blob:
if isinstance(updated_fields['blob_content'], dict): if isinstance(updated_fields["blob_content"], dict):
json.dump(updated_fields['blob_content'], f_new_blob, indent=4) json.dump(updated_fields["blob_content"], f_new_blob, indent=4)
else: else:
f_new_blob.write(updated_fields['blob_content']) f_new_blob.write(updated_fields["blob_content"])
validation_data = {'blob_file_path': str(blob_path)} validation_data = {"blob_file_path": str(blob_path)}
try: try:
_validate_with_retry('spotify', name, validation_data) _validate_with_retry("spotify", name, validation_data)
set_clause = ", ".join([f"{key} = ?" for key in db_update_data if key != 'name']) set_clause = ", ".join(
values = [db_update_data[key] for key in db_update_data if key != 'name'] + [name] [f"{key} = ?" for key in db_update_data if key != "name"]
cursor.execute(f"UPDATE spotify SET {set_clause} WHERE name = ?", tuple(values)) )
values = [
db_update_data[key] for key in db_update_data if key != "name"
] + [name]
cursor.execute(
f"UPDATE spotify SET {set_clause} WHERE name = ?", tuple(values)
)
# If validation passed and blob was in new_data, it's already written. # If validation passed and blob was in new_data, it's already written.
# If blob_content was NOT in new_data, the existing blob (if any) remains. # If blob_content was NOT in new_data, the existing blob (if any) remains.
except Exception as e: except Exception:
# Revert blob if it was changed and validation failed # Revert blob if it was changed and validation failed
if 'blob_content' in updated_fields and original_blob_content is not None: if (
with open(blob_path, 'w') as f_revert_blob: "blob_content" in updated_fields
and original_blob_content is not None
):
with open(blob_path, "w") as f_revert_blob:
f_revert_blob.write(original_blob_content) f_revert_blob.write(original_blob_content)
elif 'blob_content' in updated_fields and original_blob_content is None and blob_path.exists(): elif (
"blob_content" in updated_fields
and original_blob_content is None
and blob_path.exists()
):
# If new blob was written but there was no original to revert to, delete the new one. # If new blob was written but there was no original to revert to, delete the new one.
blob_path.unlink() blob_path.unlink()
raise # Re-raise validation or DB error raise # Re-raise validation or DB error
elif service == 'deezer': elif service == "deezer":
db_update_data = { db_update_data = {
'arl': updated_fields.get('arl', existing_cred['arl']), "arl": updated_fields.get("arl", existing_cred["arl"]),
'region': updated_fields.get('region', existing_cred['region']), "region": updated_fields.get("region", existing_cred["region"]),
'updated_at': current_time, "updated_at": current_time,
'name': name # for WHERE clause "name": name, # for WHERE clause
} }
validation_data = {'arl': db_update_data['arl']} validation_data = {"arl": db_update_data["arl"]}
_validate_with_retry('deezer', name, validation_data) # Validation happens before DB write for Deezer _validate_with_retry(
"deezer", name, validation_data
) # Validation happens before DB write for Deezer
set_clause = ", ".join([f"{key} = ?" for key in db_update_data if key != 'name']) set_clause = ", ".join(
values = [db_update_data[key] for key in db_update_data if key != 'name'] + [name] [f"{key} = ?" for key in db_update_data if key != "name"]
cursor.execute(f"UPDATE deezer SET {set_clause} WHERE name = ?", tuple(values)) )
values = [
db_update_data[key] for key in db_update_data if key != "name"
] + [name]
cursor.execute(
f"UPDATE deezer SET {set_clause} WHERE name = ?", tuple(values)
)
if cursor.rowcount == 0: # Should not happen if get_credential succeeded if cursor.rowcount == 0: # Should not happen if get_credential succeeded
raise FileNotFoundError(f"Credential '{name}' for {service} disappeared during edit.") raise FileNotFoundError(
f"Credential '{name}' for {service} disappeared during edit."
)
conn.commit() conn.commit()
logger.info(f"Credential '{name}' for {service} updated successfully.") logger.info(f"Credential '{name}' for {service} updated successfully.")
return {"status": "updated", "service": service, "name": name} return {"status": "updated", "service": service, "name": name}
# --- Helper for credential file path (mainly for Spotify blob) --- # --- Helper for credential file path (mainly for Spotify blob) ---
def get_spotify_blob_path(account_name: str) -> Path: def get_spotify_blob_path(account_name: str) -> Path:
return BLOBS_DIR / account_name / 'credentials.json' return BLOBS_DIR / account_name / "credentials.json"
# It's good practice to call init_credentials_db() when the app starts. # It's good practice to call init_credentials_db() when the app starts.
# This can be done in the main application setup. For now, defining it here. # This can be done in the main application setup. For now, defining it here.

View File

@@ -1,8 +1,4 @@
#!/usr/bin/python3
from deezspot.easy_spoty import Spo from deezspot.easy_spoty import Spo
import json
from pathlib import Path
from routes.utils.celery_queue_manager import get_config_params from routes.utils.celery_queue_manager import get_config_params
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
@@ -13,7 +9,6 @@ import logging
# Initialize logger # Initialize logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# We'll rely on get_config_params() instead of directly loading the config file
def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None): def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
""" """
@@ -32,18 +27,24 @@ def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
client_id, client_secret = _get_global_spotify_api_creds() client_id, client_secret = _get_global_spotify_api_creds()
if not client_id or not client_secret: if not client_id or not client_secret:
raise ValueError("Global Spotify API client_id or client_secret not configured in ./data/creds/search.json.") raise ValueError(
"Global Spotify API client_id or client_secret not configured in ./data/creds/search.json."
)
# Get config parameters including default Spotify account name # Get config parameters including default Spotify account name
# This might still be useful if Spo uses the account name for other things (e.g. market/region if not passed explicitly) # This might still be useful if Spo uses the account name for other things (e.g. market/region if not passed explicitly)
# For now, we are just ensuring the API keys are set. # For now, we are just ensuring the API keys are set.
config_params = get_config_params() config_params = get_config_params()
main_spotify_account_name = config_params.get('spotify', '') # Still good to know which account is 'default' contextually main_spotify_account_name = config_params.get(
"spotify", ""
) # Still good to know which account is 'default' contextually
if not main_spotify_account_name: if not main_spotify_account_name:
# This is less critical now that API keys are global, but could indicate a misconfiguration # This is less critical now that API keys are global, but could indicate a misconfiguration
# if other parts of Spo expect an account context. # if other parts of Spo expect an account context.
print(f"WARN: No default Spotify account name configured in settings (main.json). API calls will use global keys.") print(
"WARN: No default Spotify account name configured in settings (main.json). API calls will use global keys."
)
else: else:
# Optionally, one could load the specific account's region here if Spo.init or methods need it, # Optionally, one could load the specific account's region here if Spo.init or methods need it,
# but easy_spoty's Spo doesn't seem to take region directly in __init__. # but easy_spoty's Spo doesn't seem to take region directly in __init__.
@@ -51,12 +52,16 @@ def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
try: try:
# We call get_credential just to check if the account exists, # We call get_credential just to check if the account exists,
# not for client_id/secret anymore for Spo.__init__ # not for client_id/secret anymore for Spo.__init__
get_credential('spotify', main_spotify_account_name) get_credential("spotify", main_spotify_account_name)
except FileNotFoundError: except FileNotFoundError:
# This is a more serious warning if an account is expected to exist. # This is a more serious warning if an account is expected to exist.
print(f"WARN: Default Spotify account '{main_spotify_account_name}' configured in main.json was not found in credentials database.") print(
f"WARN: Default Spotify account '{main_spotify_account_name}' configured in main.json was not found in credentials database."
)
except Exception as e: except Exception as e:
print(f"WARN: Error accessing default Spotify account '{main_spotify_account_name}': {e}") print(
f"WARN: Error accessing default Spotify account '{main_spotify_account_name}': {e}"
)
# Initialize the Spotify client with GLOBAL credentials # Initialize the Spotify client with GLOBAL credentials
Spo.__init__(client_id, client_secret) Spo.__init__(client_id, client_secret)
@@ -83,6 +88,7 @@ def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
else: else:
raise ValueError(f"Unsupported Spotify type: {spotify_type}") raise ValueError(f"Unsupported Spotify type: {spotify_type}")
def get_deezer_info(deezer_id, deezer_type, limit=None): def get_deezer_info(deezer_id, deezer_type, limit=None):
""" """
Get info from Deezer API. Get info from Deezer API.
@@ -103,7 +109,9 @@ def get_deezer_info(deezer_id, deezer_type, limit=None):
ValueError: If deezer_type is unsupported. ValueError: If deezer_type is unsupported.
Various exceptions from DeezerAPI (NoDataApi, QuotaExceeded, requests.exceptions.RequestException, etc.) Various exceptions from DeezerAPI (NoDataApi, QuotaExceeded, requests.exceptions.RequestException, etc.)
""" """
logger.debug(f"Fetching Deezer info for ID {deezer_id}, type {deezer_type}, limit {limit}") logger.debug(
f"Fetching Deezer info for ID {deezer_id}, type {deezer_type}, limit {limit}"
)
# DeezerAPI uses class methods; its @classmethod __init__ handles setup. # DeezerAPI uses class methods; its @classmethod __init__ handles setup.
# No specific ARL or account handling here as DeezerAPI seems to use general endpoints. # No specific ARL or account handling here as DeezerAPI seems to use general endpoints.

View File

@@ -6,29 +6,30 @@ from pathlib import Path
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
HISTORY_DIR = Path('./data/history') HISTORY_DIR = Path("./data/history")
HISTORY_DB_FILE = HISTORY_DIR / 'download_history.db' HISTORY_DB_FILE = HISTORY_DIR / "download_history.db"
EXPECTED_COLUMNS = { EXPECTED_COLUMNS = {
'task_id': 'TEXT PRIMARY KEY', "task_id": "TEXT PRIMARY KEY",
'download_type': 'TEXT', "download_type": "TEXT",
'item_name': 'TEXT', "item_name": "TEXT",
'item_artist': 'TEXT', "item_artist": "TEXT",
'item_album': 'TEXT', "item_album": "TEXT",
'item_url': 'TEXT', "item_url": "TEXT",
'spotify_id': 'TEXT', "spotify_id": "TEXT",
'status_final': 'TEXT', # 'COMPLETED', 'ERROR', 'CANCELLED' "status_final": "TEXT", # 'COMPLETED', 'ERROR', 'CANCELLED'
'error_message': 'TEXT', "error_message": "TEXT",
'timestamp_added': 'REAL', "timestamp_added": "REAL",
'timestamp_completed': 'REAL', "timestamp_completed": "REAL",
'original_request_json': 'TEXT', "original_request_json": "TEXT",
'last_status_obj_json': 'TEXT', "last_status_obj_json": "TEXT",
'service_used': 'TEXT', "service_used": "TEXT",
'quality_profile': 'TEXT', "quality_profile": "TEXT",
'convert_to': 'TEXT', "convert_to": "TEXT",
'bitrate': 'TEXT' "bitrate": "TEXT",
} }
def init_history_db(): def init_history_db():
"""Initializes the download history database, creates the table if it doesn't exist, """Initializes the download history database, creates the table if it doesn't exist,
and adds any missing columns to an existing table.""" and adds any missing columns to an existing table."""
@@ -42,7 +43,7 @@ def init_history_db():
# The primary key constraint is handled by the initial CREATE TABLE. # The primary key constraint is handled by the initial CREATE TABLE.
# If 'task_id' is missing, it cannot be added as PRIMARY KEY to an existing table # If 'task_id' is missing, it cannot be added as PRIMARY KEY to an existing table
# without complex migrations. We assume 'task_id' will exist if the table exists. # without complex migrations. We assume 'task_id' will exist if the table exists.
create_table_sql = f""" create_table_sql = """
CREATE TABLE IF NOT EXISTS download_history ( CREATE TABLE IF NOT EXISTS download_history (
task_id TEXT PRIMARY KEY, task_id TEXT PRIMARY KEY,
download_type TEXT, download_type TEXT,
@@ -74,42 +75,54 @@ def init_history_db():
added_columns = False added_columns = False
for col_name, col_type in EXPECTED_COLUMNS.items(): for col_name, col_type in EXPECTED_COLUMNS.items():
if col_name not in existing_column_names: if col_name not in existing_column_names:
if 'PRIMARY KEY' in col_type.upper() and col_name == 'task_id': if "PRIMARY KEY" in col_type.upper() and col_name == "task_id":
# This case should be handled by CREATE TABLE, but as a safeguard: # This case should be handled by CREATE TABLE, but as a safeguard:
# If task_id is somehow missing and table exists, this is a problem. # If task_id is somehow missing and table exists, this is a problem.
# Adding it as PK here is complex and might fail if data exists. # Adding it as PK here is complex and might fail if data exists.
# For now, we assume CREATE TABLE handles the PK. # For now, we assume CREATE TABLE handles the PK.
# If we were to add it, it would be 'ALTER TABLE download_history ADD COLUMN task_id TEXT;' # If we were to add it, it would be 'ALTER TABLE download_history ADD COLUMN task_id TEXT;'
# and then potentially a separate step to make it PK if table is empty, which is non-trivial. # and then potentially a separate step to make it PK if table is empty, which is non-trivial.
logger.warning(f"Column '{col_name}' is part of PRIMARY KEY and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN.") logger.warning(
f"Column '{col_name}' is part of PRIMARY KEY and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN."
)
continue continue
# For other columns, just add them. # For other columns, just add them.
# Remove PRIMARY KEY from type definition if present, as it's only for table creation. # Remove PRIMARY KEY from type definition if present, as it's only for table creation.
col_type_for_add = col_type.replace(' PRIMARY KEY', '').strip() col_type_for_add = col_type.replace(" PRIMARY KEY", "").strip()
try: try:
cursor.execute(f"ALTER TABLE download_history ADD COLUMN {col_name} {col_type_for_add}") cursor.execute(
logger.info(f"Added missing column '{col_name} {col_type_for_add}' to download_history table.") f"ALTER TABLE download_history ADD COLUMN {col_name} {col_type_for_add}"
)
logger.info(
f"Added missing column '{col_name} {col_type_for_add}' to download_history table."
)
added_columns = True added_columns = True
except sqlite3.OperationalError as alter_e: except sqlite3.OperationalError as alter_e:
# This might happen if a column (e.g. task_id) without "PRIMARY KEY" is added by this loop # This might happen if a column (e.g. task_id) without "PRIMARY KEY" is added by this loop
# but the initial create table already made it a primary key. # but the initial create table already made it a primary key.
# Or other more complex scenarios. # Or other more complex scenarios.
logger.warning(f"Could not add column '{col_name}': {alter_e}. It might already exist or there's a schema mismatch.") logger.warning(
f"Could not add column '{col_name}': {alter_e}. It might already exist or there's a schema mismatch."
)
if added_columns: if added_columns:
conn.commit() conn.commit()
logger.info(f"Download history table schema updated at {HISTORY_DB_FILE}") logger.info(f"Download history table schema updated at {HISTORY_DB_FILE}")
else: else:
logger.info(f"Download history database schema is up-to-date at {HISTORY_DB_FILE}") logger.info(
f"Download history database schema is up-to-date at {HISTORY_DB_FILE}"
)
except sqlite3.Error as e: except sqlite3.Error as e:
logger.error(f"Error initializing download history database: {e}", exc_info=True) logger.error(
f"Error initializing download history database: {e}", exc_info=True
)
finally: finally:
if conn: if conn:
conn.close() conn.close()
def add_entry_to_history(history_data: dict): def add_entry_to_history(history_data: dict):
"""Adds or replaces an entry in the download_history table. """Adds or replaces an entry in the download_history table.
@@ -118,11 +131,23 @@ def add_entry_to_history(history_data: dict):
Expected keys match the table columns. Expected keys match the table columns.
""" """
required_keys = [ required_keys = [
'task_id', 'download_type', 'item_name', 'item_artist', 'item_album', "task_id",
'item_url', 'spotify_id', 'status_final', 'error_message', "download_type",
'timestamp_added', 'timestamp_completed', 'original_request_json', "item_name",
'last_status_obj_json', 'service_used', 'quality_profile', "item_artist",
'convert_to', 'bitrate' "item_album",
"item_url",
"spotify_id",
"status_final",
"error_message",
"timestamp_added",
"timestamp_completed",
"original_request_json",
"last_status_obj_json",
"service_used",
"quality_profile",
"convert_to",
"bitrate",
] ]
# Ensure all keys are present, filling with None if not # Ensure all keys are present, filling with None if not
for key in required_keys: for key in required_keys:
@@ -132,7 +157,8 @@ def add_entry_to_history(history_data: dict):
try: try:
conn = sqlite3.connect(HISTORY_DB_FILE) conn = sqlite3.connect(HISTORY_DB_FILE)
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute(""" cursor.execute(
"""
INSERT OR REPLACE INTO download_history ( INSERT OR REPLACE INTO download_history (
task_id, download_type, item_name, item_artist, item_album, task_id, download_type, item_name, item_artist, item_album,
item_url, spotify_id, status_final, error_message, item_url, spotify_id, status_final, error_message,
@@ -140,26 +166,49 @@ def add_entry_to_history(history_data: dict):
last_status_obj_json, service_used, quality_profile, last_status_obj_json, service_used, quality_profile,
convert_to, bitrate convert_to, bitrate
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", ( """,
history_data['task_id'], history_data['download_type'], history_data['item_name'], (
history_data['item_artist'], history_data['item_album'], history_data['item_url'], history_data["task_id"],
history_data['spotify_id'], history_data['status_final'], history_data['error_message'], history_data["download_type"],
history_data['timestamp_added'], history_data['timestamp_completed'], history_data["item_name"],
history_data['original_request_json'], history_data['last_status_obj_json'], history_data["item_artist"],
history_data['service_used'], history_data['quality_profile'], history_data["item_album"],
history_data['convert_to'], history_data['bitrate'] history_data["item_url"],
)) history_data["spotify_id"],
history_data["status_final"],
history_data["error_message"],
history_data["timestamp_added"],
history_data["timestamp_completed"],
history_data["original_request_json"],
history_data["last_status_obj_json"],
history_data["service_used"],
history_data["quality_profile"],
history_data["convert_to"],
history_data["bitrate"],
),
)
conn.commit() conn.commit()
logger.info(f"Added/Updated history for task_id: {history_data['task_id']}, status: {history_data['status_final']}") logger.info(
f"Added/Updated history for task_id: {history_data['task_id']}, status: {history_data['status_final']}"
)
except sqlite3.Error as e: except sqlite3.Error as e:
logger.error(f"Error adding entry to download history for task_id {history_data.get('task_id')}: {e}", exc_info=True) logger.error(
f"Error adding entry to download history for task_id {history_data.get('task_id')}: {e}",
exc_info=True,
)
except Exception as e: except Exception as e:
logger.error(f"Unexpected error adding to history for task_id {history_data.get('task_id')}: {e}", exc_info=True) logger.error(
f"Unexpected error adding to history for task_id {history_data.get('task_id')}: {e}",
exc_info=True,
)
finally: finally:
if conn: if conn:
conn.close() conn.close()
def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_order='DESC', filters=None):
def get_history_entries(
limit=25, offset=0, sort_by="timestamp_completed", sort_order="DESC", filters=None
):
"""Retrieves entries from the download_history table with pagination, sorting, and filtering. """Retrieves entries from the download_history table with pagination, sorting, and filtering.
Args: Args:
@@ -189,7 +238,7 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_
if filters: if filters:
for column, value in filters.items(): for column, value in filters.items():
# Basic security: ensure column is a valid one (alphanumeric + underscore) # Basic security: ensure column is a valid one (alphanumeric + underscore)
if column.replace('_', '').isalnum(): if column.replace("_", "").isalnum():
where_clauses.append(f"{column} = ?") where_clauses.append(f"{column} = ?")
params.append(value) params.append(value)
@@ -204,16 +253,26 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_
# Validate sort_by and sort_order to prevent SQL injection # Validate sort_by and sort_order to prevent SQL injection
valid_sort_columns = [ valid_sort_columns = [
'task_id', 'download_type', 'item_name', 'item_artist', 'item_album', "task_id",
'item_url', 'status_final', 'timestamp_added', 'timestamp_completed', "download_type",
'service_used', 'quality_profile', 'convert_to', 'bitrate' "item_name",
"item_artist",
"item_album",
"item_url",
"status_final",
"timestamp_added",
"timestamp_completed",
"service_used",
"quality_profile",
"convert_to",
"bitrate",
] ]
if sort_by not in valid_sort_columns: if sort_by not in valid_sort_columns:
sort_by = 'timestamp_completed' # Default sort sort_by = "timestamp_completed" # Default sort
sort_order_upper = sort_order.upper() sort_order_upper = sort_order.upper()
if sort_order_upper not in ['ASC', 'DESC']: if sort_order_upper not in ["ASC", "DESC"]:
sort_order_upper = 'DESC' sort_order_upper = "DESC"
select_query += f" ORDER BY {sort_by} {sort_order_upper} LIMIT ? OFFSET ?" select_query += f" ORDER BY {sort_by} {sort_order_upper} LIMIT ? OFFSET ?"
params.extend([limit, offset]) params.extend([limit, offset])
@@ -232,72 +291,79 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_
if conn: if conn:
conn.close() conn.close()
if __name__ == '__main__':
if __name__ == "__main__":
# For testing purposes # For testing purposes
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
init_history_db() init_history_db()
sample_data_complete = { sample_data_complete = {
'task_id': 'test_task_123', "task_id": "test_task_123",
'download_type': 'track', "download_type": "track",
'item_name': 'Test Song', "item_name": "Test Song",
'item_artist': 'Test Artist', "item_artist": "Test Artist",
'item_album': 'Test Album', "item_album": "Test Album",
'item_url': 'http://spotify.com/track/123', "item_url": "http://spotify.com/track/123",
'spotify_id': '123', "spotify_id": "123",
'status_final': 'COMPLETED', "status_final": "COMPLETED",
'error_message': None, "error_message": None,
'timestamp_added': time.time() - 3600, "timestamp_added": time.time() - 3600,
'timestamp_completed': time.time(), "timestamp_completed": time.time(),
'original_request_json': json.dumps({'param1': 'value1'}), "original_request_json": json.dumps({"param1": "value1"}),
'last_status_obj_json': json.dumps({'status': 'complete', 'message': 'Finished!'}), "last_status_obj_json": json.dumps(
'service_used': 'Spotify (Primary)', {"status": "complete", "message": "Finished!"}
'quality_profile': 'NORMAL', ),
'convert_to': None, "service_used": "Spotify (Primary)",
'bitrate': None "quality_profile": "NORMAL",
"convert_to": None,
"bitrate": None,
} }
add_entry_to_history(sample_data_complete) add_entry_to_history(sample_data_complete)
sample_data_error = { sample_data_error = {
'task_id': 'test_task_456', "task_id": "test_task_456",
'download_type': 'album', "download_type": "album",
'item_name': 'Another Album', "item_name": "Another Album",
'item_artist': 'Another Artist', "item_artist": "Another Artist",
'item_album': 'Another Album', # For albums, item_name and item_album are often the same "item_album": "Another Album", # For albums, item_name and item_album are often the same
'item_url': 'http://spotify.com/album/456', "item_url": "http://spotify.com/album/456",
'spotify_id': '456', "spotify_id": "456",
'status_final': 'ERROR', "status_final": "ERROR",
'error_message': 'Download failed due to network issue.', "error_message": "Download failed due to network issue.",
'timestamp_added': time.time() - 7200, "timestamp_added": time.time() - 7200,
'timestamp_completed': time.time() - 60, "timestamp_completed": time.time() - 60,
'original_request_json': json.dumps({'param2': 'value2'}), "original_request_json": json.dumps({"param2": "value2"}),
'last_status_obj_json': json.dumps({'status': 'error', 'error': 'Network issue'}), "last_status_obj_json": json.dumps(
'service_used': 'Deezer', {"status": "error", "error": "Network issue"}
'quality_profile': 'MP3_320', ),
'convert_to': 'mp3', "service_used": "Deezer",
'bitrate': '320' "quality_profile": "MP3_320",
"convert_to": "mp3",
"bitrate": "320",
} }
add_entry_to_history(sample_data_error) add_entry_to_history(sample_data_error)
# Test updating an entry # Test updating an entry
updated_data_complete = { updated_data_complete = {
'task_id': 'test_task_123', "task_id": "test_task_123",
'download_type': 'track', "download_type": "track",
'item_name': 'Test Song (Updated)', "item_name": "Test Song (Updated)",
'item_artist': 'Test Artist', "item_artist": "Test Artist",
'item_album': 'Test Album II', "item_album": "Test Album II",
'item_url': 'http://spotify.com/track/123', "item_url": "http://spotify.com/track/123",
'spotify_id': '123', "spotify_id": "123",
'status_final': 'COMPLETED', "status_final": "COMPLETED",
'error_message': None, "error_message": None,
'timestamp_added': time.time() - 3600, "timestamp_added": time.time() - 3600,
'timestamp_completed': time.time() + 100, # Updated completion time "timestamp_completed": time.time() + 100, # Updated completion time
'original_request_json': json.dumps({'param1': 'value1', 'new_param': 'added'}), "original_request_json": json.dumps({"param1": "value1", "new_param": "added"}),
'last_status_obj_json': json.dumps({'status': 'complete', 'message': 'Finished! With update.'}), "last_status_obj_json": json.dumps(
'service_used': 'Spotify (Deezer Fallback)', {"status": "complete", "message": "Finished! With update."}
'quality_profile': 'HIGH', ),
'convert_to': 'flac', "service_used": "Spotify (Deezer Fallback)",
'bitrate': None "quality_profile": "HIGH",
"convert_to": "flac",
"bitrate": None,
} }
add_entry_to_history(updated_data_complete) add_entry_to_history(updated_data_complete)
@@ -310,13 +376,17 @@ if __name__ == '__main__':
print(entry) print(entry)
print("\nFetching history entries (sorted by item_name ASC, limit 2, offset 1):") print("\nFetching history entries (sorted by item_name ASC, limit 2, offset 1):")
entries_sorted, total_sorted = get_history_entries(limit=2, offset=1, sort_by='item_name', sort_order='ASC') entries_sorted, total_sorted = get_history_entries(
limit=2, offset=1, sort_by="item_name", sort_order="ASC"
)
print(f"Total entries (should be same as above): {total_sorted}") print(f"Total entries (should be same as above): {total_sorted}")
for entry in entries_sorted: for entry in entries_sorted:
print(entry) print(entry)
print("\nFetching history entries with filter (status_final = COMPLETED):") print("\nFetching history entries with filter (status_final = COMPLETED):")
entries_filtered, total_filtered = get_history_entries(filters={'status_final': 'COMPLETED'}) entries_filtered, total_filtered = get_history_entries(
filters={"status_final": "COMPLETED"}
)
print(f"Total COMPLETED entries: {total_filtered}") print(f"Total COMPLETED entries: {total_filtered}")
for entry in entries_filtered: for entry in entries_filtered:
print(entry) print(entry)

View File

@@ -1,11 +1,9 @@
import os
import json
import traceback import traceback
from deezspot.spotloader import SpoLogin from deezspot.spotloader import SpoLogin
from deezspot.deezloader import DeeLogin from deezspot.deezloader import DeeLogin
from pathlib import Path from pathlib import Path
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
from routes.utils.celery_config import get_config_params
def download_playlist( def download_playlist(
url, url,
@@ -23,51 +21,63 @@ def download_playlist(
max_retries=3, max_retries=3,
progress_callback=None, progress_callback=None,
convert_to=None, convert_to=None,
bitrate=None bitrate=None,
): ):
try: try:
# Detect URL source (Spotify or Deezer) from URL # Detect URL source (Spotify or Deezer) from URL
is_spotify_url = 'open.spotify.com' in url.lower() is_spotify_url = "open.spotify.com" in url.lower()
is_deezer_url = 'deezer.com' in url.lower() is_deezer_url = "deezer.com" in url.lower()
service = '' service = ""
if is_spotify_url: if is_spotify_url:
service = 'spotify' service = "spotify"
elif is_deezer_url: elif is_deezer_url:
service = 'deezer' service = "deezer"
else: else:
error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com" error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com"
print(f"ERROR: {error_msg}") print(f"ERROR: {error_msg}")
raise ValueError(error_msg) raise ValueError(error_msg)
print(f"DEBUG: playlist.py - Service determined from URL: {service}") print(f"DEBUG: playlist.py - Service determined from URL: {service}")
print(f"DEBUG: playlist.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'") print(
f"DEBUG: playlist.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'"
)
# Get global Spotify API credentials # Get global Spotify API credentials
global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds() global_spotify_client_id, global_spotify_client_secret = (
_get_global_spotify_api_creds()
)
if not global_spotify_client_id or not global_spotify_client_secret: if not global_spotify_client_id or not global_spotify_client_secret:
warning_msg = "WARN: playlist.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail." warning_msg = "WARN: playlist.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail."
print(warning_msg) print(warning_msg)
if service == 'spotify': if service == "spotify":
if fallback: # Fallback is a Deezer account name for a Spotify URL if fallback: # Fallback is a Deezer account name for a Spotify URL
if quality is None: quality = 'FLAC' # Deezer quality for first attempt if quality is None:
if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails) quality = "FLAC" # Deezer quality for first attempt
if fall_quality is None:
fall_quality = (
"HIGH" # Spotify quality for fallback (if Deezer fails)
)
deezer_error = None deezer_error = None
try: try:
# Attempt 1: Deezer via download_playlistspo (using 'fallback' as Deezer account name) # Attempt 1: Deezer via download_playlistspo (using 'fallback' as Deezer account name)
print(f"DEBUG: playlist.py - Spotify URL. Attempt 1: Deezer (account: {fallback})") print(
deezer_fallback_creds = get_credential('deezer', fallback) f"DEBUG: playlist.py - Spotify URL. Attempt 1: Deezer (account: {fallback})"
arl = deezer_fallback_creds.get('arl') )
deezer_fallback_creds = get_credential("deezer", fallback)
arl = deezer_fallback_creds.get("arl")
if not arl: if not arl:
raise ValueError(f"ARL not found for Deezer account '{fallback}'.") raise ValueError(
f"ARL not found for Deezer account '{fallback}'."
)
dl = DeeLogin( dl = DeeLogin(
arl=arl, arl=arl,
spotify_client_id=global_spotify_client_id, spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret, spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback progress_callback=progress_callback,
) )
dl.download_playlistspo( dl.download_playlistspo(
link_playlist=url, # Spotify URL link_playlist=url, # Spotify URL
@@ -85,30 +95,45 @@ def download_playlist(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: playlist.py - Playlist download via Deezer (account: {fallback}) successful for Spotify URL."
) )
print(f"DEBUG: playlist.py - Playlist download via Deezer (account: {fallback}) successful for Spotify URL.")
except Exception as e: except Exception as e:
deezer_error = e deezer_error = e
print(f"ERROR: playlist.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}") print(
f"ERROR: playlist.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}"
)
traceback.print_exc() traceback.print_exc()
print(f"DEBUG: playlist.py - Attempting Spotify direct download (account: {main} for blob)...") print(
f"DEBUG: playlist.py - Attempting Spotify direct download (account: {main} for blob)..."
)
# Attempt 2: Spotify direct via download_playlist (using 'main' as Spotify account for blob) # Attempt 2: Spotify direct via download_playlist (using 'main' as Spotify account for blob)
try: try:
if not global_spotify_client_id or not global_spotify_client_secret: if (
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") not global_spotify_client_id
or not global_spotify_client_secret
):
raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
spotify_main_creds = get_credential('spotify', main) # For blob path spotify_main_creds = get_credential(
blob_file_path = spotify_main_creds.get('blob_file_path') "spotify", main
) # For blob path
blob_file_path = spotify_main_creds.get("blob_file_path")
if not Path(blob_file_path).exists(): if not Path(blob_file_path).exists():
raise FileNotFoundError(f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'") raise FileNotFoundError(
f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'"
)
spo = SpoLogin( spo = SpoLogin(
credentials_path=blob_file_path, credentials_path=blob_file_path,
spotify_client_id=global_spotify_client_id, spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret, spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback progress_callback=progress_callback,
) )
spo.download_playlist( spo.download_playlist(
link_playlist=url, # Spotify URL link_playlist=url, # Spotify URL
@@ -127,33 +152,44 @@ def download_playlist(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: playlist.py - Spotify direct download (account: {main} for blob) successful."
) )
print(f"DEBUG: playlist.py - Spotify direct download (account: {main} for blob) successful.")
except Exception as e2: except Exception as e2:
print(f"ERROR: playlist.py - Spotify direct download (account: {main} for blob) also failed: {e2}") print(
f"ERROR: playlist.py - Spotify direct download (account: {main} for blob) also failed: {e2}"
)
raise RuntimeError( raise RuntimeError(
f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. " f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. "
f"Deezer error: {deezer_error}, Spotify error: {e2}" f"Deezer error: {deezer_error}, Spotify error: {e2}"
) from e2 ) from e2
else: else:
# Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob) # Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob)
if quality is None: quality = 'HIGH' # Default Spotify quality if quality is None:
print(f"DEBUG: playlist.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}") quality = "HIGH" # Default Spotify quality
print(
f"DEBUG: playlist.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}"
)
if not global_spotify_client_id or not global_spotify_client_secret: if not global_spotify_client_id or not global_spotify_client_secret:
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
spotify_main_creds = get_credential('spotify', main) # For blob path spotify_main_creds = get_credential("spotify", main) # For blob path
blob_file_path = spotify_main_creds.get('blob_file_path') blob_file_path = spotify_main_creds.get("blob_file_path")
if not Path(blob_file_path).exists(): if not Path(blob_file_path).exists():
raise FileNotFoundError(f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'") raise FileNotFoundError(
f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'"
)
spo = SpoLogin( spo = SpoLogin(
credentials_path=blob_file_path, credentials_path=blob_file_path,
spotify_client_id=global_spotify_client_id, spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret, spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback progress_callback=progress_callback,
) )
spo.download_playlist( spo.download_playlist(
link_playlist=url, link_playlist=url,
@@ -172,16 +208,21 @@ def download_playlist(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: playlist.py - Direct Spotify download (account: {main} for blob) successful."
) )
print(f"DEBUG: playlist.py - Direct Spotify download (account: {main} for blob) successful.")
elif service == 'deezer': elif service == "deezer":
# Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL) # Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL)
if quality is None: quality = 'FLAC' # Default Deezer quality if quality is None:
print(f"DEBUG: playlist.py - Deezer URL. Direct download with Deezer account: {main}") quality = "FLAC" # Default Deezer quality
deezer_main_creds = get_credential('deezer', main) # For ARL print(
arl = deezer_main_creds.get('arl') f"DEBUG: playlist.py - Deezer URL. Direct download with Deezer account: {main}"
)
deezer_main_creds = get_credential("deezer", main) # For ARL
arl = deezer_main_creds.get("arl")
if not arl: if not arl:
raise ValueError(f"ARL not found for Deezer account '{main}'.") raise ValueError(f"ARL not found for Deezer account '{main}'.")
@@ -189,7 +230,7 @@ def download_playlist(
arl=arl, # Account specific ARL arl=arl, # Account specific ARL
spotify_client_id=global_spotify_client_id, # Global Spotify keys spotify_client_id=global_spotify_client_id, # Global Spotify keys
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
progress_callback=progress_callback progress_callback=progress_callback,
) )
dl.download_playlistdee( # Deezer URL, download via Deezer dl.download_playlistdee( # Deezer URL, download via Deezer
link_playlist=url, link_playlist=url,
@@ -206,9 +247,11 @@ def download_playlist(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: playlist.py - Direct Deezer download (account: {main}) successful."
) )
print(f"DEBUG: playlist.py - Direct Deezer download (account: {main}) successful.")
else: else:
# Should be caught by initial service check, but as a safeguard # Should be caught by initial service check, but as a safeguard
raise ValueError(f"Unsupported service determined: {service}") raise ValueError(f"Unsupported service determined: {service}")

View File

@@ -1,50 +1,58 @@
from deezspot.easy_spoty import Spo from deezspot.easy_spoty import Spo
import json
from pathlib import Path
import logging import logging
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
# Configure logger # Configure logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def search(
query: str, def search(query: str, search_type: str, limit: int = 3, main: str = None) -> dict:
search_type: str, logger.info(
limit: int = 3, f"Search requested: query='{query}', type={search_type}, limit={limit}, main_account_name={main}"
main: str = None )
) -> dict:
logger.info(f"Search requested: query='{query}', type={search_type}, limit={limit}, main_account_name={main}")
client_id, client_secret = _get_global_spotify_api_creds() client_id, client_secret = _get_global_spotify_api_creds()
if not client_id or not client_secret: if not client_id or not client_secret:
logger.error("Global Spotify API client_id or client_secret not configured in ./data/creds/search.json.") logger.error(
raise ValueError("Spotify API credentials are not configured globally for search.") "Global Spotify API client_id or client_secret not configured in ./data/creds/search.json."
)
raise ValueError(
"Spotify API credentials are not configured globally for search."
)
if main: if main:
logger.debug(f"Spotify account context '{main}' was provided for search. API keys are global, but this account might be used for other context by Spo if relevant.") logger.debug(
f"Spotify account context '{main}' was provided for search. API keys are global, but this account might be used for other context by Spo if relevant."
)
try: try:
get_credential('spotify', main) get_credential("spotify", main)
logger.debug(f"Spotify account '{main}' exists.") logger.debug(f"Spotify account '{main}' exists.")
except FileNotFoundError: except FileNotFoundError:
logger.warning(f"Spotify account '{main}' provided for search context not found in credentials. Search will proceed with global API keys.") logger.warning(
f"Spotify account '{main}' provided for search context not found in credentials. Search will proceed with global API keys."
)
except Exception as e: except Exception as e:
logger.warning(f"Error checking existence of Spotify account '{main}': {e}. Search will proceed with global API keys.") logger.warning(
f"Error checking existence of Spotify account '{main}': {e}. Search will proceed with global API keys."
)
else: else:
logger.debug("No specific 'main' account context provided for search. Using global API keys.") logger.debug(
"No specific 'main' account context provided for search. Using global API keys."
)
logger.debug(f"Initializing Spotify client with global API credentials for search.") logger.debug("Initializing Spotify client with global API credentials for search.")
Spo.__init__(client_id, client_secret) Spo.__init__(client_id, client_secret)
logger.debug(f"Executing Spotify search with query='{query}', type={search_type}, limit={limit}") logger.debug(
try: f"Executing Spotify search with query='{query}', type={search_type}, limit={limit}"
spotify_response = Spo.search(
query=query,
search_type=search_type,
limit=limit
) )
try:
spotify_response = Spo.search(query=query, search_type=search_type, limit=limit)
logger.info(f"Search completed successfully for query: '{query}'") logger.info(f"Search completed successfully for query: '{query}'")
return spotify_response return spotify_response
except Exception as e: except Exception as e:
logger.error(f"Error during Spotify search for query '{query}': {e}", exc_info=True) logger.error(
f"Error during Spotify search for query '{query}': {e}", exc_info=True
)
raise raise

View File

@@ -1,11 +1,12 @@
import os
import json
import traceback import traceback
from deezspot.spotloader import SpoLogin from deezspot.spotloader import SpoLogin
from deezspot.deezloader import DeeLogin from deezspot.deezloader import DeeLogin
from pathlib import Path from routes.utils.credentials import (
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds, get_spotify_blob_path get_credential,
from routes.utils.celery_config import get_config_params _get_global_spotify_api_creds,
get_spotify_blob_path,
)
def download_track( def download_track(
url, url,
@@ -23,28 +24,32 @@ def download_track(
max_retries=3, max_retries=3,
progress_callback=None, progress_callback=None,
convert_to=None, convert_to=None,
bitrate=None bitrate=None,
): ):
try: try:
# Detect URL source (Spotify or Deezer) from URL # Detect URL source (Spotify or Deezer) from URL
is_spotify_url = 'open.spotify.com' in url.lower() is_spotify_url = "open.spotify.com" in url.lower()
is_deezer_url = 'deezer.com' in url.lower() is_deezer_url = "deezer.com" in url.lower()
service = '' service = ""
if is_spotify_url: if is_spotify_url:
service = 'spotify' service = "spotify"
elif is_deezer_url: elif is_deezer_url:
service = 'deezer' service = "deezer"
else: else:
error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com" error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com"
print(f"ERROR: {error_msg}") print(f"ERROR: {error_msg}")
raise ValueError(error_msg) raise ValueError(error_msg)
print(f"DEBUG: track.py - Service determined from URL: {service}") print(f"DEBUG: track.py - Service determined from URL: {service}")
print(f"DEBUG: track.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'") print(
f"DEBUG: track.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'"
)
# Get global Spotify API credentials for SpoLogin and DeeLogin (if it uses Spotify search) # Get global Spotify API credentials for SpoLogin and DeeLogin (if it uses Spotify search)
global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds() global_spotify_client_id, global_spotify_client_secret = (
_get_global_spotify_api_creds()
)
if not global_spotify_client_id or not global_spotify_client_secret: if not global_spotify_client_id or not global_spotify_client_secret:
# This is a critical failure if Spotify operations are involved # This is a critical failure if Spotify operations are involved
warning_msg = "WARN: track.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail." warning_msg = "WARN: track.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail."
@@ -52,25 +57,33 @@ def download_track(
# Depending on flow, might want to raise error here if service is 'spotify' # Depending on flow, might want to raise error here if service is 'spotify'
# For now, let it proceed and fail at SpoLogin/DeeLogin init if keys are truly needed and missing. # For now, let it proceed and fail at SpoLogin/DeeLogin init if keys are truly needed and missing.
if service == 'spotify': if service == "spotify":
if fallback: # Fallback is a Deezer account name for a Spotify URL if fallback: # Fallback is a Deezer account name for a Spotify URL
if quality is None: quality = 'FLAC' # Deezer quality for first attempt if quality is None:
if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails) quality = "FLAC" # Deezer quality for first attempt
if fall_quality is None:
fall_quality = (
"HIGH" # Spotify quality for fallback (if Deezer fails)
)
deezer_error = None deezer_error = None
try: try:
# Attempt 1: Deezer via download_trackspo (using 'fallback' as Deezer account name) # Attempt 1: Deezer via download_trackspo (using 'fallback' as Deezer account name)
print(f"DEBUG: track.py - Spotify URL. Attempt 1: Deezer (account: {fallback})") print(
deezer_fallback_creds = get_credential('deezer', fallback) f"DEBUG: track.py - Spotify URL. Attempt 1: Deezer (account: {fallback})"
arl = deezer_fallback_creds.get('arl') )
deezer_fallback_creds = get_credential("deezer", fallback)
arl = deezer_fallback_creds.get("arl")
if not arl: if not arl:
raise ValueError(f"ARL not found for Deezer account '{fallback}'.") raise ValueError(
f"ARL not found for Deezer account '{fallback}'."
)
dl = DeeLogin( dl = DeeLogin(
arl=arl, arl=arl,
spotify_client_id=global_spotify_client_id, # Global creds spotify_client_id=global_spotify_client_id, # Global creds
spotify_client_secret=global_spotify_client_secret, # Global creds spotify_client_secret=global_spotify_client_secret, # Global creds
progress_callback=progress_callback progress_callback=progress_callback,
) )
# download_trackspo means: Spotify URL, download via Deezer # download_trackspo means: Spotify URL, download via Deezer
dl.download_trackspo( dl.download_trackspo(
@@ -87,30 +100,47 @@ def download_track(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: track.py - Track download via Deezer (account: {fallback}) successful for Spotify URL."
) )
print(f"DEBUG: track.py - Track download via Deezer (account: {fallback}) successful for Spotify URL.")
except Exception as e: except Exception as e:
deezer_error = e deezer_error = e
print(f"ERROR: track.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}") print(
f"ERROR: track.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}"
)
traceback.print_exc() traceback.print_exc()
print(f"DEBUG: track.py - Attempting Spotify direct download (account: {main})...") print(
f"DEBUG: track.py - Attempting Spotify direct download (account: {main})..."
)
# Attempt 2: Spotify direct via download_track (using 'main' as Spotify account for blob) # Attempt 2: Spotify direct via download_track (using 'main' as Spotify account for blob)
try: try:
if not global_spotify_client_id or not global_spotify_client_secret: if (
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") not global_spotify_client_id
or not global_spotify_client_secret
):
raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
# Use get_spotify_blob_path directly # Use get_spotify_blob_path directly
blob_file_path = get_spotify_blob_path(main) blob_file_path = get_spotify_blob_path(main)
if not blob_file_path.exists(): # Check existence on the Path object if (
raise FileNotFoundError(f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'") not blob_file_path.exists()
): # Check existence on the Path object
raise FileNotFoundError(
f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'"
)
spo = SpoLogin( spo = SpoLogin(
credentials_path=str(blob_file_path), # Account specific blob credentials_path=str(
blob_file_path
), # Account specific blob
spotify_client_id=global_spotify_client_id, # Global API keys spotify_client_id=global_spotify_client_id, # Global API keys
spotify_client_secret=global_spotify_client_secret, # Global API keys spotify_client_secret=global_spotify_client_secret, # Global API keys
progress_callback=progress_callback progress_callback=progress_callback,
) )
spo.download_track( spo.download_track(
link_track=url, # Spotify URL link_track=url, # Spotify URL
@@ -128,33 +158,44 @@ def download_track(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: track.py - Spotify direct download (account: {main} for blob) successful."
) )
print(f"DEBUG: track.py - Spotify direct download (account: {main} for blob) successful.")
except Exception as e2: except Exception as e2:
print(f"ERROR: track.py - Spotify direct download (account: {main} for blob) also failed: {e2}") print(
f"ERROR: track.py - Spotify direct download (account: {main} for blob) also failed: {e2}"
)
raise RuntimeError( raise RuntimeError(
f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. " f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. "
f"Deezer error: {deezer_error}, Spotify error: {e2}" f"Deezer error: {deezer_error}, Spotify error: {e2}"
) from e2 ) from e2
else: else:
# Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob) # Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob)
if quality is None: quality = 'HIGH' # Default Spotify quality if quality is None:
print(f"DEBUG: track.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}") quality = "HIGH" # Default Spotify quality
print(
f"DEBUG: track.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}"
)
if not global_spotify_client_id or not global_spotify_client_secret: if not global_spotify_client_id or not global_spotify_client_secret:
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.") raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
# Use get_spotify_blob_path directly # Use get_spotify_blob_path directly
blob_file_path = get_spotify_blob_path(main) blob_file_path = get_spotify_blob_path(main)
if not blob_file_path.exists(): # Check existence on the Path object if not blob_file_path.exists(): # Check existence on the Path object
raise FileNotFoundError(f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'") raise FileNotFoundError(
f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'"
)
spo = SpoLogin( spo = SpoLogin(
credentials_path=str(blob_file_path), # Account specific blob credentials_path=str(blob_file_path), # Account specific blob
spotify_client_id=global_spotify_client_id, # Global API keys spotify_client_id=global_spotify_client_id, # Global API keys
spotify_client_secret=global_spotify_client_secret, # Global API keys spotify_client_secret=global_spotify_client_secret, # Global API keys
progress_callback=progress_callback progress_callback=progress_callback,
) )
spo.download_track( spo.download_track(
link_track=url, link_track=url,
@@ -172,16 +213,21 @@ def download_track(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: track.py - Direct Spotify download (account: {main} for blob) successful."
) )
print(f"DEBUG: track.py - Direct Spotify download (account: {main} for blob) successful.")
elif service == 'deezer': elif service == "deezer":
# Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL) # Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL)
if quality is None: quality = 'FLAC' # Default Deezer quality if quality is None:
print(f"DEBUG: track.py - Deezer URL. Direct download with Deezer account: {main}") quality = "FLAC" # Default Deezer quality
deezer_main_creds = get_credential('deezer', main) # For ARL print(
arl = deezer_main_creds.get('arl') f"DEBUG: track.py - Deezer URL. Direct download with Deezer account: {main}"
)
deezer_main_creds = get_credential("deezer", main) # For ARL
arl = deezer_main_creds.get("arl")
if not arl: if not arl:
raise ValueError(f"ARL not found for Deezer account '{main}'.") raise ValueError(f"ARL not found for Deezer account '{main}'.")
@@ -189,7 +235,7 @@ def download_track(
arl=arl, # Account specific ARL arl=arl, # Account specific ARL
spotify_client_id=global_spotify_client_id, # Global Spotify keys for internal Spo use by DeeLogin spotify_client_id=global_spotify_client_id, # Global Spotify keys for internal Spo use by DeeLogin
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
progress_callback=progress_callback progress_callback=progress_callback,
) )
dl.download_trackdee( # Deezer URL, download via Deezer dl.download_trackdee( # Deezer URL, download via Deezer
link_track=url, link_track=url,
@@ -205,12 +251,14 @@ def download_track(
retry_delay_increase=retry_delay_increase, retry_delay_increase=retry_delay_increase,
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate bitrate=bitrate,
)
print(
f"DEBUG: track.py - Direct Deezer download (account: {main}) successful."
) )
print(f"DEBUG: track.py - Direct Deezer download (account: {main}) successful.")
else: else:
# Should be caught by initial service check, but as a safeguard # Should be caught by initial service check, but as a safeguard
raise ValueError(f"Unsupported service determined: {service}") raise ValueError(f"Unsupported service determined: {service}")
except Exception as e: except Exception:
traceback.print_exc() traceback.print_exc()
raise raise

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,7 @@ import threading
import logging import logging
import json import json
from pathlib import Path from pathlib import Path
from typing import Any, List, Dict
from routes.utils.watch.db import ( from routes.utils.watch.db import (
get_watched_playlists, get_watched_playlists,
@@ -12,18 +13,18 @@ from routes.utils.watch.db import (
update_playlist_snapshot, update_playlist_snapshot,
mark_tracks_as_not_present_in_spotify, mark_tracks_as_not_present_in_spotify,
# Artist watch DB functions # Artist watch DB functions
init_artists_db,
get_watched_artists, get_watched_artists,
get_watched_artist, get_watched_artist,
get_artist_album_ids_from_db, get_artist_album_ids_from_db,
add_or_update_album_for_artist, # Renamed from add_album_to_artist_db update_artist_metadata_after_check, # Renamed from update_artist_metadata
update_artist_metadata_after_check # Renamed from update_artist_metadata
) )
from routes.utils.get_info import get_spotify_info # To fetch playlist, track, artist, and album details from routes.utils.get_info import (
get_spotify_info,
) # To fetch playlist, track, artist, and album details
from routes.utils.celery_queue_manager import download_queue_manager from routes.utils.celery_queue_manager import download_queue_manager
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
CONFIG_FILE_PATH = Path('./data/config/watch.json') CONFIG_FILE_PATH = Path("./data/config/watch.json")
STOP_EVENT = threading.Event() STOP_EVENT = threading.Event()
DEFAULT_WATCH_CONFIG = { DEFAULT_WATCH_CONFIG = {
@@ -32,9 +33,10 @@ DEFAULT_WATCH_CONFIG = {
"max_tracks_per_run": 50, # For playlists "max_tracks_per_run": 50, # For playlists
"watchedArtistAlbumGroup": ["album", "single"], # Default for artists "watchedArtistAlbumGroup": ["album", "single"], # Default for artists
"delay_between_playlists_seconds": 2, "delay_between_playlists_seconds": 2,
"delay_between_artists_seconds": 5 # Added for artists "delay_between_artists_seconds": 5, # Added for artists
} }
def get_watch_config(): def get_watch_config():
"""Loads the watch configuration from watch.json. """Loads the watch configuration from watch.json.
Creates the file with defaults if it doesn't exist. Creates the file with defaults if it doesn't exist.
@@ -45,12 +47,14 @@ def get_watch_config():
CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True) CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True)
if not CONFIG_FILE_PATH.exists(): if not CONFIG_FILE_PATH.exists():
logger.info(f"{CONFIG_FILE_PATH} not found. Creating with default watch config.") logger.info(
with open(CONFIG_FILE_PATH, 'w') as f: f"{CONFIG_FILE_PATH} not found. Creating with default watch config."
)
with open(CONFIG_FILE_PATH, "w") as f:
json.dump(DEFAULT_WATCH_CONFIG, f, indent=2) json.dump(DEFAULT_WATCH_CONFIG, f, indent=2)
return DEFAULT_WATCH_CONFIG.copy() return DEFAULT_WATCH_CONFIG.copy()
with open(CONFIG_FILE_PATH, 'r') as f: with open(CONFIG_FILE_PATH, "r") as f:
config = json.load(f) config = json.load(f)
updated = False updated = False
@@ -60,28 +64,39 @@ def get_watch_config():
updated = True updated = True
if updated: if updated:
logger.info(f"Watch configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults.") logger.info(
with open(CONFIG_FILE_PATH, 'w') as f: f"Watch configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults."
)
with open(CONFIG_FILE_PATH, "w") as f:
json.dump(config, f, indent=2) json.dump(config, f, indent=2)
return config return config
except Exception as e: except Exception as e:
logger.error(f"Error loading or creating watch config at {CONFIG_FILE_PATH}: {e}", exc_info=True) logger.error(
f"Error loading or creating watch config at {CONFIG_FILE_PATH}: {e}",
exc_info=True,
)
return DEFAULT_WATCH_CONFIG.copy() # Fallback return DEFAULT_WATCH_CONFIG.copy() # Fallback
def construct_spotify_url(item_id, item_type="track"): def construct_spotify_url(item_id, item_type="track"):
return f"https://open.spotify.com/{item_type}/{item_id}" return f"https://open.spotify.com/{item_type}/{item_id}"
def check_watched_playlists(specific_playlist_id: str = None): def check_watched_playlists(specific_playlist_id: str = None):
"""Checks watched playlists for new tracks and queues downloads. """Checks watched playlists for new tracks and queues downloads.
If specific_playlist_id is provided, only that playlist is checked. If specific_playlist_id is provided, only that playlist is checked.
""" """
logger.info(f"Playlist Watch Manager: Starting check. Specific playlist: {specific_playlist_id or 'All'}") logger.info(
f"Playlist Watch Manager: Starting check. Specific playlist: {specific_playlist_id or 'All'}"
)
config = get_watch_config() config = get_watch_config()
if specific_playlist_id: if specific_playlist_id:
playlist_obj = get_watched_playlist(specific_playlist_id) playlist_obj = get_watched_playlist(specific_playlist_id)
if not playlist_obj: if not playlist_obj:
logger.error(f"Playlist Watch Manager: Playlist {specific_playlist_id} not found in watch database.") logger.error(
f"Playlist Watch Manager: Playlist {specific_playlist_id} not found in watch database."
)
return return
watched_playlists_to_check = [playlist_obj] watched_playlists_to_check = [playlist_obj]
else: else:
@@ -92,19 +107,30 @@ def check_watched_playlists(specific_playlist_id: str = None):
return return
for playlist_in_db in watched_playlists_to_check: for playlist_in_db in watched_playlists_to_check:
playlist_spotify_id = playlist_in_db['spotify_id'] playlist_spotify_id = playlist_in_db["spotify_id"]
playlist_name = playlist_in_db['name'] playlist_name = playlist_in_db["name"]
logger.info(f"Playlist Watch Manager: Checking playlist '{playlist_name}' ({playlist_spotify_id})...") logger.info(
f"Playlist Watch Manager: Checking playlist '{playlist_name}' ({playlist_spotify_id})..."
)
try: try:
# For playlists, we fetch all tracks in one go usually (Spotify API limit permitting) # For playlists, we fetch all tracks in one go usually (Spotify API limit permitting)
current_playlist_data_from_api = get_spotify_info(playlist_spotify_id, "playlist") current_playlist_data_from_api = get_spotify_info(
if not current_playlist_data_from_api or 'tracks' not in current_playlist_data_from_api: playlist_spotify_id, "playlist"
logger.error(f"Playlist Watch Manager: Failed to fetch data or tracks from Spotify for playlist {playlist_spotify_id}.") )
if (
not current_playlist_data_from_api
or "tracks" not in current_playlist_data_from_api
):
logger.error(
f"Playlist Watch Manager: Failed to fetch data or tracks from Spotify for playlist {playlist_spotify_id}."
)
continue continue
api_snapshot_id = current_playlist_data_from_api.get('snapshot_id') api_snapshot_id = current_playlist_data_from_api.get("snapshot_id")
api_total_tracks = current_playlist_data_from_api.get('tracks', {}).get('total', 0) api_total_tracks = current_playlist_data_from_api.get("tracks", {}).get(
"total", 0
)
# Paginate through playlist tracks if necessary # Paginate through playlist tracks if necessary
all_api_track_items = [] all_api_track_items = []
@@ -121,16 +147,21 @@ def check_watched_playlists(specific_playlist_id: str = None):
# If it doesn't, this part would need adjustment for robust pagination. # If it doesn't, this part would need adjustment for robust pagination.
# For now, we use the items from the initial fetch. # For now, we use the items from the initial fetch.
paginated_playlist_data = get_spotify_info(playlist_spotify_id, "playlist", offset=offset, limit=limit) paginated_playlist_data = get_spotify_info(
if not paginated_playlist_data or 'tracks' not in paginated_playlist_data: playlist_spotify_id, "playlist", offset=offset, limit=limit
)
if (
not paginated_playlist_data
or "tracks" not in paginated_playlist_data
):
break break
page_items = paginated_playlist_data.get('tracks', {}).get('items', []) page_items = paginated_playlist_data.get("tracks", {}).get("items", [])
if not page_items: if not page_items:
break break
all_api_track_items.extend(page_items) all_api_track_items.extend(page_items)
if paginated_playlist_data.get('tracks', {}).get('next'): if paginated_playlist_data.get("tracks", {}).get("next"):
offset += limit offset += limit
else: else:
break break
@@ -138,9 +169,9 @@ def check_watched_playlists(specific_playlist_id: str = None):
current_api_track_ids = set() current_api_track_ids = set()
api_track_id_to_item_map = {} api_track_id_to_item_map = {}
for item in all_api_track_items: # Use all_api_track_items for item in all_api_track_items: # Use all_api_track_items
track = item.get('track') track = item.get("track")
if track and track.get('id') and not track.get('is_local'): if track and track.get("id") and not track.get("is_local"):
track_id = track['id'] track_id = track["id"]
current_api_track_ids.add(track_id) current_api_track_ids.add(track_id)
api_track_id_to_item_map[track_id] = item api_track_id_to_item_map[track_id] = item
@@ -149,74 +180,118 @@ def check_watched_playlists(specific_playlist_id: str = None):
new_track_ids_for_download = current_api_track_ids - db_track_ids new_track_ids_for_download = current_api_track_ids - db_track_ids
queued_for_download_count = 0 queued_for_download_count = 0
if new_track_ids_for_download: if new_track_ids_for_download:
logger.info(f"Playlist Watch Manager: Found {len(new_track_ids_for_download)} new tracks for playlist '{playlist_name}' to download.") logger.info(
f"Playlist Watch Manager: Found {len(new_track_ids_for_download)} new tracks for playlist '{playlist_name}' to download."
)
for track_id in new_track_ids_for_download: for track_id in new_track_ids_for_download:
api_item = api_track_id_to_item_map.get(track_id) api_item = api_track_id_to_item_map.get(track_id)
if not api_item or not api_item.get("track"): if not api_item or not api_item.get("track"):
logger.warning(f"Playlist Watch Manager: Missing track details in API map for new track_id {track_id} in playlist {playlist_spotify_id}. Cannot queue.") logger.warning(
f"Playlist Watch Manager: Missing track details in API map for new track_id {track_id} in playlist {playlist_spotify_id}. Cannot queue."
)
continue continue
track_to_queue = api_item["track"] track_to_queue = api_item["track"]
task_payload = { task_payload = {
"download_type": "track", "download_type": "track",
"url": construct_spotify_url(track_id, "track"), "url": construct_spotify_url(track_id, "track"),
"name": track_to_queue.get('name', 'Unknown Track'), "name": track_to_queue.get("name", "Unknown Track"),
"artist": ", ".join([a['name'] for a in track_to_queue.get('artists', []) if a.get('name')]), "artist": ", ".join(
[
a["name"]
for a in track_to_queue.get("artists", [])
if a.get("name")
]
),
"orig_request": { "orig_request": {
"source": "playlist_watch", "source": "playlist_watch",
"playlist_id": playlist_spotify_id, "playlist_id": playlist_spotify_id,
"playlist_name": playlist_name, "playlist_name": playlist_name,
"track_spotify_id": track_id, "track_spotify_id": track_id,
"track_item_for_db": api_item # Pass full API item for DB update on completion "track_item_for_db": api_item, # Pass full API item for DB update on completion
} },
# "track_details_for_db" was old name, using track_item_for_db consistent with celery_tasks # "track_details_for_db" was old name, using track_item_for_db consistent with celery_tasks
} }
try: try:
task_id_or_none = download_queue_manager.add_task(task_payload, from_watch_job=True) task_id_or_none = download_queue_manager.add_task(
task_payload, from_watch_job=True
)
if task_id_or_none: # Task was newly queued if task_id_or_none: # Task was newly queued
logger.info(f"Playlist Watch Manager: Queued download task {task_id_or_none} for new track {track_id} ('{track_to_queue.get('name')}') from playlist '{playlist_name}'.") logger.info(
f"Playlist Watch Manager: Queued download task {task_id_or_none} for new track {track_id} ('{track_to_queue.get('name')}') from playlist '{playlist_name}'."
)
queued_for_download_count += 1 queued_for_download_count += 1
# If task_id_or_none is None, it was a duplicate and not re-queued, Celery manager handles logging. # If task_id_or_none is None, it was a duplicate and not re-queued, Celery manager handles logging.
except Exception as e: except Exception as e:
logger.error(f"Playlist Watch Manager: Failed to queue download for new track {track_id} from playlist '{playlist_name}': {e}", exc_info=True) logger.error(
logger.info(f"Playlist Watch Manager: Attempted to queue {queued_for_download_count} new tracks for playlist '{playlist_name}'.") f"Playlist Watch Manager: Failed to queue download for new track {track_id} from playlist '{playlist_name}': {e}",
exc_info=True,
)
logger.info(
f"Playlist Watch Manager: Attempted to queue {queued_for_download_count} new tracks for playlist '{playlist_name}'."
)
else: else:
logger.info(f"Playlist Watch Manager: No new tracks to download for playlist '{playlist_name}'.") logger.info(
f"Playlist Watch Manager: No new tracks to download for playlist '{playlist_name}'."
)
# Update DB for tracks that are still present in API (e.g. update 'last_seen_in_spotify') # Update DB for tracks that are still present in API (e.g. update 'last_seen_in_spotify')
# add_tracks_to_playlist_db handles INSERT OR REPLACE, updating existing entries. # add_tracks_to_playlist_db handles INSERT OR REPLACE, updating existing entries.
# We should pass all current API tracks to ensure their `last_seen_in_spotify` and `is_present_in_spotify` are updated. # We should pass all current API tracks to ensure their `last_seen_in_spotify` and `is_present_in_spotify` are updated.
if all_api_track_items: # If there are any tracks in the API for this playlist if (
logger.info(f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'.") all_api_track_items
): # If there are any tracks in the API for this playlist
logger.info(
f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'."
)
add_tracks_to_playlist_db(playlist_spotify_id, all_api_track_items) add_tracks_to_playlist_db(playlist_spotify_id, all_api_track_items)
removed_db_ids = db_track_ids - current_api_track_ids removed_db_ids = db_track_ids - current_api_track_ids
if removed_db_ids: if removed_db_ids:
logger.info(f"Playlist Watch Manager: {len(removed_db_ids)} tracks removed from Spotify playlist '{playlist_name}'. Marking in DB.") logger.info(
mark_tracks_as_not_present_in_spotify(playlist_spotify_id, list(removed_db_ids)) f"Playlist Watch Manager: {len(removed_db_ids)} tracks removed from Spotify playlist '{playlist_name}'. Marking in DB."
)
mark_tracks_as_not_present_in_spotify(
playlist_spotify_id, list(removed_db_ids)
)
update_playlist_snapshot(playlist_spotify_id, api_snapshot_id, api_total_tracks) # api_total_tracks from initial fetch update_playlist_snapshot(
logger.info(f"Playlist Watch Manager: Finished checking playlist '{playlist_name}'. Snapshot ID updated. API Total Tracks: {api_total_tracks}.") playlist_spotify_id, api_snapshot_id, api_total_tracks
) # api_total_tracks from initial fetch
logger.info(
f"Playlist Watch Manager: Finished checking playlist '{playlist_name}'. Snapshot ID updated. API Total Tracks: {api_total_tracks}."
)
except Exception as e: except Exception as e:
logger.error(f"Playlist Watch Manager: Error processing playlist {playlist_spotify_id}: {e}", exc_info=True) logger.error(
f"Playlist Watch Manager: Error processing playlist {playlist_spotify_id}: {e}",
exc_info=True,
)
time.sleep(max(1, config.get("delay_between_playlists_seconds", 2))) time.sleep(max(1, config.get("delay_between_playlists_seconds", 2)))
logger.info("Playlist Watch Manager: Finished checking all watched playlists.") logger.info("Playlist Watch Manager: Finished checking all watched playlists.")
def check_watched_artists(specific_artist_id: str = None): def check_watched_artists(specific_artist_id: str = None):
"""Checks watched artists for new albums and queues downloads.""" """Checks watched artists for new albums and queues downloads."""
logger.info(f"Artist Watch Manager: Starting check. Specific artist: {specific_artist_id or 'All'}") logger.info(
f"Artist Watch Manager: Starting check. Specific artist: {specific_artist_id or 'All'}"
)
config = get_watch_config() config = get_watch_config()
watched_album_groups = [g.lower() for g in config.get("watchedArtistAlbumGroup", ["album", "single"])] watched_album_groups = [
logger.info(f"Artist Watch Manager: Watching for album groups: {watched_album_groups}") g.lower() for g in config.get("watchedArtistAlbumGroup", ["album", "single"])
]
logger.info(
f"Artist Watch Manager: Watching for album groups: {watched_album_groups}"
)
if specific_artist_id: if specific_artist_id:
artist_obj_in_db = get_watched_artist(specific_artist_id) artist_obj_in_db = get_watched_artist(specific_artist_id)
if not artist_obj_in_db: if not artist_obj_in_db:
logger.error(f"Artist Watch Manager: Artist {specific_artist_id} not found in watch database.") logger.error(
f"Artist Watch Manager: Artist {specific_artist_id} not found in watch database."
)
return return
artists_to_check = [artist_obj_in_db] artists_to_check = [artist_obj_in_db]
else: else:
@@ -227,9 +302,11 @@ def check_watched_artists(specific_artist_id: str = None):
return return
for artist_in_db in artists_to_check: for artist_in_db in artists_to_check:
artist_spotify_id = artist_in_db['spotify_id'] artist_spotify_id = artist_in_db["spotify_id"]
artist_name = artist_in_db['name'] artist_name = artist_in_db["name"]
logger.info(f"Artist Watch Manager: Checking artist '{artist_name}' ({artist_spotify_id})...") logger.info(
f"Artist Watch Manager: Checking artist '{artist_name}' ({artist_spotify_id})..."
)
try: try:
# Spotify API for artist albums is paginated. # Spotify API for artist albums is paginated.
@@ -237,58 +314,84 @@ def check_watched_artists(specific_artist_id: str = None):
# Let's assume get_spotify_info(artist_id, 'artist-albums') returns a list of all album objects. # Let's assume get_spotify_info(artist_id, 'artist-albums') returns a list of all album objects.
# Or we implement pagination here. # Or we implement pagination here.
all_artist_albums_from_api = [] all_artist_albums_from_api: List[Dict[str, Any]] = []
offset = 0 offset = 0
limit = 50 # Spotify API limit for artist albums limit = 50 # Spotify API limit for artist albums
while True: while True:
# The 'artist-albums' type for get_spotify_info needs to support pagination params. # The 'artist-albums' type for get_spotify_info needs to support pagination params.
# And return a list of album objects. # And return a list of album objects.
logger.debug(f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}") logger.debug(
artist_albums_page = get_spotify_info(artist_spotify_id, "artist_discography", limit=limit, offset=offset) f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}"
)
artist_albums_page = get_spotify_info(
artist_spotify_id, "artist_discography", limit=limit, offset=offset
)
if not artist_albums_page or not isinstance(artist_albums_page.get('items'), list): if not artist_albums_page or not isinstance(
logger.warning(f"Artist Watch Manager: No album items found or invalid format for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Response: {artist_albums_page}") artist_albums_page.get("items"), list
):
logger.warning(
f"Artist Watch Manager: No album items found or invalid format for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Response: {artist_albums_page}"
)
break break
current_page_albums = artist_albums_page.get('items', []) current_page_albums = artist_albums_page.get("items", [])
if not current_page_albums: if not current_page_albums:
logger.info(f"Artist Watch Manager: No more albums on page for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Total fetched so far: {len(all_artist_albums_from_api)}.") logger.info(
f"Artist Watch Manager: No more albums on page for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Total fetched so far: {len(all_artist_albums_from_api)}."
)
break break
logger.debug(f"Artist Watch Manager: Fetched {len(current_page_albums)} albums on current page for artist '{artist_name}'.") logger.debug(
f"Artist Watch Manager: Fetched {len(current_page_albums)} albums on current page for artist '{artist_name}'."
)
all_artist_albums_from_api.extend(current_page_albums) all_artist_albums_from_api.extend(current_page_albums)
# Correct pagination: Check if Spotify indicates a next page URL # Correct pagination: Check if Spotify indicates a next page URL
# The `next` field in Spotify API responses is a URL to the next page or null. # The `next` field in Spotify API responses is a URL to the next page or null.
if artist_albums_page.get('next'): if artist_albums_page.get("next"):
offset += limit # CORRECT: Increment offset by the limit used for the request offset += limit # CORRECT: Increment offset by the limit used for the request
else: else:
logger.info(f"Artist Watch Manager: No 'next' page URL for artist '{artist_name}'. Pagination complete. Total albums fetched: {len(all_artist_albums_from_api)}.") logger.info(
f"Artist Watch Manager: No 'next' page URL for artist '{artist_name}'. Pagination complete. Total albums fetched: {len(all_artist_albums_from_api)}."
)
break break
# total_albums_from_api = len(all_artist_albums_from_api) # total_albums_from_api = len(all_artist_albums_from_api)
# Use the 'total' field from the API response for a more accurate count of all available albums (matching current API filter if any) # Use the 'total' field from the API response for a more accurate count of all available albums (matching current API filter if any)
api_reported_total_albums = artist_albums_page.get('total', 0) if 'artist_albums_page' in locals() and artist_albums_page else len(all_artist_albums_from_api) api_reported_total_albums = (
logger.info(f"Artist Watch Manager: Fetched {len(all_artist_albums_from_api)} albums in total from API for artist '{artist_name}'. API reports total: {api_reported_total_albums}.") artist_albums_page.get("total", 0)
if "artist_albums_page" in locals() and artist_albums_page
else len(all_artist_albums_from_api)
)
logger.info(
f"Artist Watch Manager: Fetched {len(all_artist_albums_from_api)} albums in total from API for artist '{artist_name}'. API reports total: {api_reported_total_albums}."
)
db_album_ids = get_artist_album_ids_from_db(artist_spotify_id) db_album_ids = get_artist_album_ids_from_db(artist_spotify_id)
logger.info(f"Artist Watch Manager: Found {len(db_album_ids)} albums in DB for artist '{artist_name}'. These will be skipped if re-encountered unless logic changes.") logger.info(
f"Artist Watch Manager: Found {len(db_album_ids)} albums in DB for artist '{artist_name}'. These will be skipped if re-encountered unless logic changes."
)
queued_for_download_count = 0 queued_for_download_count = 0
processed_album_ids_in_run = set() # To avoid processing duplicate album_ids if API returns them across pages (should not happen with correct pagination) processed_album_ids_in_run = set() # To avoid processing duplicate album_ids if API returns them across pages (should not happen with correct pagination)
for album_data in all_artist_albums_from_api: for album_data in all_artist_albums_from_api:
album_id = album_data.get('id') album_id = album_data.get("id")
album_name = album_data.get('name', 'Unknown Album') album_name = album_data.get("name", "Unknown Album")
album_group = album_data.get('album_group', 'N/A').lower() album_group = album_data.get("album_group", "N/A").lower()
album_type = album_data.get('album_type', 'N/A').lower() album_type = album_data.get("album_type", "N/A").lower()
if not album_id: if not album_id:
logger.warning(f"Artist Watch Manager: Skipping album without ID for artist '{artist_name}'. Album data: {album_data}") logger.warning(
f"Artist Watch Manager: Skipping album without ID for artist '{artist_name}'. Album data: {album_data}"
)
continue continue
if album_id in processed_album_ids_in_run: if album_id in processed_album_ids_in_run:
logger.debug(f"Artist Watch Manager: Album '{album_name}' ({album_id}) already processed in this run. Skipping.") logger.debug(
f"Artist Watch Manager: Album '{album_name}' ({album_id}) already processed in this run. Skipping."
)
continue continue
processed_album_ids_in_run.add(album_id) processed_album_ids_in_run.add(album_id)
@@ -296,19 +399,31 @@ def check_watched_artists(specific_artist_id: str = None):
# The album_group field is generally preferred for this type of categorization as per Spotify docs. # The album_group field is generally preferred for this type of categorization as per Spotify docs.
is_matching_group = album_group in watched_album_groups is_matching_group = album_group in watched_album_groups
logger.debug(f"Artist '{artist_name}', Album '{album_name}' ({album_id}): album_group='{album_group}', album_type='{album_type}'. Watched groups: {watched_album_groups}. Match: {is_matching_group}.") logger.debug(
f"Artist '{artist_name}', Album '{album_name}' ({album_id}): album_group='{album_group}', album_type='{album_type}'. Watched groups: {watched_album_groups}. Match: {is_matching_group}."
)
if not is_matching_group: if not is_matching_group:
logger.debug(f"Artist Watch Manager: Skipping album '{album_name}' ({album_id}) by '{artist_name}' - group '{album_group}' not in watched list: {watched_album_groups}.") logger.debug(
f"Artist Watch Manager: Skipping album '{album_name}' ({album_id}) by '{artist_name}' - group '{album_group}' not in watched list: {watched_album_groups}."
)
continue continue
logger.info(f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' (group: {album_group}) IS a matching group.") logger.info(
f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' (group: {album_group}) IS a matching group."
)
if album_id not in db_album_ids: if album_id not in db_album_ids:
logger.info(f"Artist Watch Manager: Found NEW matching album '{album_name}' ({album_id}) by '{artist_name}'. Queuing for download.") logger.info(
f"Artist Watch Manager: Found NEW matching album '{album_name}' ({album_id}) by '{artist_name}'. Queuing for download."
)
album_artists_list = album_data.get('artists', []) album_artists_list = album_data.get("artists", [])
album_main_artist_name = album_artists_list[0].get('name', 'Unknown Artist') if album_artists_list else 'Unknown Artist' album_main_artist_name = (
album_artists_list[0].get("name", "Unknown Artist")
if album_artists_list
else "Unknown Artist"
)
task_payload = { task_payload = {
"download_type": "album", # Or "track" if downloading individual tracks of album later "download_type": "album", # Or "track" if downloading individual tracks of album later
@@ -320,8 +435,8 @@ def check_watched_artists(specific_artist_id: str = None):
"artist_spotify_id": artist_spotify_id, # Watched artist "artist_spotify_id": artist_spotify_id, # Watched artist
"artist_name": artist_name, "artist_name": artist_name,
"album_spotify_id": album_id, "album_spotify_id": album_id,
"album_data_for_db": album_data # Pass full API album object for DB update on completion/queuing "album_data_for_db": album_data, # Pass full API album object for DB update on completion/queuing
} },
} }
try: try:
# Add to DB first with task_id, then queue. Or queue and add task_id to DB. # Add to DB first with task_id, then queue. Or queue and add task_id to DB.
@@ -332,38 +447,56 @@ def check_watched_artists(specific_artist_id: str = None):
# Task_id will be added if successfully queued # Task_id will be added if successfully queued
# We should call add_task first, and if it returns a task_id (not a duplicate), then update our DB. # We should call add_task first, and if it returns a task_id (not a duplicate), then update our DB.
task_id_or_none = download_queue_manager.add_task(task_payload, from_watch_job=True) task_id_or_none = download_queue_manager.add_task(
task_payload, from_watch_job=True
)
if task_id_or_none: # Task was newly queued if task_id_or_none: # Task was newly queued
# REMOVED: add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=task_id_or_none, is_download_complete=False) # REMOVED: add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=task_id_or_none, is_download_complete=False)
# The album will be added/updated in the DB by celery_tasks.py upon successful download completion. # The album will be added/updated in the DB by celery_tasks.py upon successful download completion.
logger.info(f"Artist Watch Manager: Queued download task {task_id_or_none} for new album '{album_name}' from artist '{artist_name}'. DB entry will be created/updated on success.") logger.info(
f"Artist Watch Manager: Queued download task {task_id_or_none} for new album '{album_name}' from artist '{artist_name}'. DB entry will be created/updated on success."
)
queued_for_download_count += 1 queued_for_download_count += 1
# If task_id_or_none is None, it was a duplicate. Celery manager handles logging. # If task_id_or_none is None, it was a duplicate. Celery manager handles logging.
except Exception as e: except Exception as e:
logger.error(f"Artist Watch Manager: Failed to queue download for new album {album_id} ('{album_name}') from artist '{artist_name}': {e}", exc_info=True) logger.error(
f"Artist Watch Manager: Failed to queue download for new album {album_id} ('{album_name}') from artist '{artist_name}': {e}",
exc_info=True,
)
else: else:
logger.info(f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' already known in DB (ID found in db_album_ids). Skipping queue.") logger.info(
f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' already known in DB (ID found in db_album_ids). Skipping queue."
)
# Optionally, update its entry (e.g. last_seen, or if details changed), but for now, we only queue new ones. # Optionally, update its entry (e.g. last_seen, or if details changed), but for now, we only queue new ones.
# add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=None, is_download_complete=False) # would update added_to_db_at # add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=None, is_download_complete=False) # would update added_to_db_at
logger.info(f"Artist Watch Manager: For artist '{artist_name}', processed {len(all_artist_albums_from_api)} API albums, attempted to queue {queued_for_download_count} new albums.") logger.info(
f"Artist Watch Manager: For artist '{artist_name}', processed {len(all_artist_albums_from_api)} API albums, attempted to queue {queued_for_download_count} new albums."
)
update_artist_metadata_after_check(artist_spotify_id, api_reported_total_albums) update_artist_metadata_after_check(
logger.info(f"Artist Watch Manager: Finished checking artist '{artist_name}'. DB metadata updated. API reported total albums (for API filter): {api_reported_total_albums}.") artist_spotify_id, api_reported_total_albums
)
logger.info(
f"Artist Watch Manager: Finished checking artist '{artist_name}'. DB metadata updated. API reported total albums (for API filter): {api_reported_total_albums}."
)
except Exception as e: except Exception as e:
logger.error(f"Artist Watch Manager: Error processing artist {artist_spotify_id} ('{artist_name}'): {e}", exc_info=True) logger.error(
f"Artist Watch Manager: Error processing artist {artist_spotify_id} ('{artist_name}'): {e}",
exc_info=True,
)
time.sleep(max(1, config.get("delay_between_artists_seconds", 5))) time.sleep(max(1, config.get("delay_between_artists_seconds", 5)))
logger.info("Artist Watch Manager: Finished checking all watched artists.") logger.info("Artist Watch Manager: Finished checking all watched artists.")
def playlist_watch_scheduler(): def playlist_watch_scheduler():
"""Periodically calls check_watched_playlists and check_watched_artists.""" """Periodically calls check_watched_playlists and check_watched_artists."""
logger.info("Watch Scheduler: Thread started.") logger.info("Watch Scheduler: Thread started.")
config = get_watch_config() # Load config once at start, or reload each loop? Reload each loop for dynamic changes.
while not STOP_EVENT.is_set(): while not STOP_EVENT.is_set():
current_config = get_watch_config() # Get latest config for this run current_config = get_watch_config() # Get latest config for this run
@@ -371,8 +504,12 @@ def playlist_watch_scheduler():
watch_enabled = current_config.get("enabled", False) # Get enabled status watch_enabled = current_config.get("enabled", False) # Get enabled status
if not watch_enabled: if not watch_enabled:
logger.info("Watch Scheduler: Watch feature is disabled in config. Skipping checks.") logger.info(
STOP_EVENT.wait(interval) # Still respect poll interval for checking config again "Watch Scheduler: Watch feature is disabled in config. Skipping checks."
)
STOP_EVENT.wait(
interval
) # Still respect poll interval for checking config again
continue # Skip to next iteration continue # Skip to next iteration
try: try:
@@ -380,41 +517,61 @@ def playlist_watch_scheduler():
check_watched_playlists() check_watched_playlists()
logger.info("Watch Scheduler: Playlist check run completed.") logger.info("Watch Scheduler: Playlist check run completed.")
except Exception as e: except Exception as e:
logger.error(f"Watch Scheduler: Unhandled exception during check_watched_playlists: {e}", exc_info=True) logger.error(
f"Watch Scheduler: Unhandled exception during check_watched_playlists: {e}",
exc_info=True,
)
# Add a small delay between playlist and artist checks if desired # Add a small delay between playlist and artist checks if desired
# time.sleep(current_config.get("delay_between_check_types_seconds", 10)) # time.sleep(current_config.get("delay_between_check_types_seconds", 10))
if STOP_EVENT.is_set(): break # Check stop event again before starting artist check if STOP_EVENT.is_set():
break # Check stop event again before starting artist check
try: try:
logger.info("Watch Scheduler: Starting artist check run.") logger.info("Watch Scheduler: Starting artist check run.")
check_watched_artists() check_watched_artists()
logger.info("Watch Scheduler: Artist check run completed.") logger.info("Watch Scheduler: Artist check run completed.")
except Exception as e: except Exception as e:
logger.error(f"Watch Scheduler: Unhandled exception during check_watched_artists: {e}", exc_info=True) logger.error(
f"Watch Scheduler: Unhandled exception during check_watched_artists: {e}",
exc_info=True,
)
logger.info(f"Watch Scheduler: All checks complete. Next run in {interval} seconds.") logger.info(
f"Watch Scheduler: All checks complete. Next run in {interval} seconds."
)
STOP_EVENT.wait(interval) STOP_EVENT.wait(interval)
logger.info("Watch Scheduler: Thread stopped.") logger.info("Watch Scheduler: Thread stopped.")
# --- Global thread for the scheduler --- # --- Global thread for the scheduler ---
_watch_scheduler_thread = None # Renamed from _playlist_watch_thread _watch_scheduler_thread = None # Renamed from _playlist_watch_thread
def start_watch_manager(): # Renamed from start_playlist_watch_manager def start_watch_manager(): # Renamed from start_playlist_watch_manager
global _watch_scheduler_thread global _watch_scheduler_thread
if _watch_scheduler_thread is None or not _watch_scheduler_thread.is_alive(): if _watch_scheduler_thread is None or not _watch_scheduler_thread.is_alive():
STOP_EVENT.clear() STOP_EVENT.clear()
# Initialize DBs on start # Initialize DBs on start
from routes.utils.watch.db import init_playlists_db, init_artists_db # Updated import from routes.utils.watch.db import (
init_playlists_db,
init_artists_db,
) # Updated import
init_playlists_db() # For playlists init_playlists_db() # For playlists
init_artists_db() # For artists init_artists_db() # For artists
_watch_scheduler_thread = threading.Thread(target=playlist_watch_scheduler, daemon=True) _watch_scheduler_thread = threading.Thread(
target=playlist_watch_scheduler, daemon=True
)
_watch_scheduler_thread.start() _watch_scheduler_thread.start()
logger.info("Watch Manager: Background scheduler started (includes playlists and artists).") logger.info(
"Watch Manager: Background scheduler started (includes playlists and artists)."
)
else: else:
logger.info("Watch Manager: Background scheduler already running.") logger.info("Watch Manager: Background scheduler already running.")
def stop_watch_manager(): # Renamed from stop_playlist_watch_manager def stop_watch_manager(): # Renamed from stop_playlist_watch_manager
global _watch_scheduler_thread global _watch_scheduler_thread
if _watch_scheduler_thread and _watch_scheduler_thread.is_alive(): if _watch_scheduler_thread and _watch_scheduler_thread.is_alive():
@@ -429,5 +586,6 @@ def stop_watch_manager(): # Renamed from stop_playlist_watch_manager
else: else:
logger.info("Watch Manager: Background scheduler not running.") logger.info("Watch Manager: Background scheduler not running.")
# If this module is imported, and you want to auto-start the manager, you could call start_watch_manager() here. # If this module is imported, and you want to auto-start the manager, you could call start_watch_manager() here.
# However, it's usually better to explicitly start it from the main application/__init__.py. # However, it's usually better to explicitly start it from the main application/__init__.py.

View File

@@ -1,5 +1,6 @@
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg"> <svg width="800px" height="800px" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="Warning / Info"> <g id="Warning / Info">
<path id="Vector" d="M12 11V16M12 21C7.02944 21 3 16.9706 3 12C3 7.02944 7.02944 3 12 3C16.9706 3 21 7.02944 21 12C21 16.9706 16.9706 21 12 21ZM12.0498 8V8.1L11.9502 8.1002V8H12.0498Z" stroke="#000000" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/> <path id="Vector" d="M12 11V16M12 21C7.02944 21 3 16.9706 3 12C3 7.02944 7.02944 3 12 3C16.9706 3 21 7.02944 21 12C21 16.9706 16.9706 21 12 21ZM12.0498 8V8.1L11.9502 8.1002V8H12.0498Z" stroke="#000000" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>

Before

Width:  |  Height:  |  Size: 531 B

After

Width:  |  Height:  |  Size: 527 B

View File

@@ -6,7 +6,8 @@
<g id="🔍-System-Icons" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd"> <g id="🔍-System-Icons" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="ic_fluent_missing_metadata_24_filled" fill="#212121" fill-rule="nonzero"> <g id="ic_fluent_missing_metadata_24_filled" fill="#212121" fill-rule="nonzero">
<path d="M17.5,12 C20.5376,12 23,14.4624 23,17.5 C23,20.5376 20.5376,23 17.5,23 C14.4624,23 12,20.5376 12,17.5 C12,14.4624 14.4624,12 17.5,12 Z M19.7501,2 C20.9927,2 22.0001,3.00736 22.0001,4.25 L22.0001,9.71196 C22.0001,10.50198 21.7124729,11.2623046 21.1951419,11.8530093 L21.0222,12.0361 C20.0073,11.3805 18.7981,11 17.5,11 C13.9101,11 11,13.9101 11,17.5 C11,18.7703 11.3644,19.9554 11.9943,20.9567 C10.7373,21.7569 9.05064,21.6098 7.95104,20.5143 L3.48934,16.0592 C2.21887,14.7913 2.21724,12.7334 3.48556,11.4632 L11.9852,2.95334 C12.5948,2.34297 13.4221,2 14.2847,2 L19.7501,2 Z M17.5,19.88 C17.1551,19.88 16.8755,20.1596 16.8755,20.5045 C16.8755,20.8494 17.1551,21.129 17.5,21.129 C17.8449,21.129 18.1245,20.8494 18.1245,20.5045 C18.1245,20.1596 17.8449,19.88 17.5,19.88 Z M17.5,14.0031 C16.4521,14.0031 15.6357,14.8205 15.6467,15.9574 C15.6493,16.2335 15.8753,16.4552 16.1514,16.4526 C16.4276,16.4499 16.6493,16.2239 16.6465901,15.9478 C16.6411,15.3688 17.0063,15.0031 17.5,15.0031 C17.9724,15.0031 18.3534,15.395 18.3534,15.9526 C18.3534,16.1448571 18.298151,16.2948694 18.1295283,16.5141003 L18.0355,16.63 L17.9365,16.7432 L17.6711,17.0333 C17.1868,17.5749 17,17.9255 17,18.5006 C17,18.7767 17.2239,19.0006 17.5,19.0006 C17.7762,19.0006 18,18.7767 18,18.5006 C18,18.297425 18.0585703,18.1416422 18.2388846,17.9103879 L18.3238,17.8063 L18.4247,17.6908 L18.6905,17.4003 C19.1682,16.866 19.3534,16.5186 19.3534,15.9526 C19.3534,14.8489 18.5311,14.0031 17.5,14.0031 Z M17,5.50218 C16.1716,5.50218 15.5001,6.17374 15.5001,7.00216 C15.5001,7.83057 16.1716,8.50213 17,8.50213 C17.8284,8.50213 18.5,7.83057 18.5,7.00216 C18.5,6.17374 17.8284,5.50218 17,5.50218 Z" id="🎨-Color"> <path d="M17.5,12 C20.5376,12 23,14.4624 23,17.5 C23,20.5376 20.5376,23 17.5,23 C14.4624,23 12,20.5376 12,17.5 C12,14.4624 14.4624,12 17.5,12 Z M19.7501,2 C20.9927,2 22.0001,3.00736 22.0001,4.25 L22.0001,9.71196 C22.0001,10.50198 21.7124729,11.2623046 21.1951419,11.8530093 L21.0222,12.0361 C20.0073,11.3805 18.7981,11 17.5,11 C13.9101,11 11,13.9101 11,17.5 C11,18.7703 11.3644,19.9554 11.9943,20.9567 C10.7373,21.7569 9.05064,21.6098 7.95104,20.5143 L3.48934,16.0592 C2.21887,14.7913 2.21724,12.7334 3.48556,11.4632 L11.9852,2.95334 C12.5948,2.34297 13.4221,2 14.2847,2 L19.7501,2 Z M17.5,19.88 C17.1551,19.88 16.8755,20.1596 16.8755,20.5045 C16.8755,20.8494 17.1551,21.129 17.5,21.129 C17.8449,21.129 18.1245,20.8494 18.1245,20.5045 C18.1245,20.1596 17.8449,19.88 17.5,19.88 Z M17.5,14.0031 C16.4521,14.0031 15.6357,14.8205 15.6467,15.9574 C15.6493,16.2335 15.8753,16.4552 16.1514,16.4526 C16.4276,16.4499 16.6493,16.2239 16.6465901,15.9478 C16.6411,15.3688 17.0063,15.0031 17.5,15.0031 C17.9724,15.0031 18.3534,15.395 18.3534,15.9526 C18.3534,16.1448571 18.298151,16.2948694 18.1295283,16.5141003 L18.0355,16.63 L17.9365,16.7432 L17.6711,17.0333 C17.1868,17.5749 17,17.9255 17,18.5006 C17,18.7767 17.2239,19.0006 17.5,19.0006 C17.7762,19.0006 18,18.7767 18,18.5006 C18,18.297425 18.0585703,18.1416422 18.2388846,17.9103879 L18.3238,17.8063 L18.4247,17.6908 L18.6905,17.4003 C19.1682,16.866 19.3534,16.5186 19.3534,15.9526 C19.3534,14.8489 18.5311,14.0031 17.5,14.0031 Z M17,5.50218 C16.1716,5.50218 15.5001,6.17374 15.5001,7.00216 C15.5001,7.83057 16.1716,8.50213 17,8.50213 C17.8284,8.50213 18.5,7.83057 18.5,7.00216 C18.5,6.17374 17.8284,5.50218 17,5.50218 Z" id="🎨-Color">
</path>
</g> </g>
</g> </g>
</svg> </svg>

Before

Width:  |  Height:  |  Size: 2.2 KiB

After

Width:  |  Height:  |  Size: 2.2 KiB

View File

@@ -1,11 +1,11 @@
{ {
"compilerOptions": { "compilerOptions": {
"target": "ES2017", // Specify ECMAScript target version "target": "ES2017",
"module": "ES2020", // Specify module code generation "module": "ES2020",
"strict": true, // Enable all strict type-checking options "strict": true,
"esModuleInterop": true, // Enables emit interoperability between CommonJS and ES Modules "esModuleInterop": true,
"skipLibCheck": true, // Skip type checking of declaration files "skipLibCheck": true,
"forceConsistentCasingInFileNames": true, // Disallow inconsistently-cased references to the same file. "forceConsistentCasingInFileNames": true,
"outDir": "./static/js", "outDir": "./static/js",
"rootDir": "./src/js" "rootDir": "./src/js"
}, },
@@ -20,6 +20,6 @@
"src/js/track.ts" "src/js/track.ts"
], ],
"exclude": [ "exclude": [
"node_modules" // Specifies an array of filenames or patterns that should be skipped when resolving include. "node_modules"
] ]
} }